aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kprobes.txt16
-rw-r--r--arch/x86/include/asm/asm.h7
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/kernel/alternative.c3
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c3
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c3
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/entry_32.S33
-rw-r--r--arch/x86/kernel/entry_64.S20
-rw-r--r--arch/x86/kernel/hw_breakpoint.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c128
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c17
-rw-r--r--arch/x86/kernel/kprobes/opt.c32
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/nmi.c18
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/kernel/traps.c35
-rw-r--r--arch/x86/lib/thunk_32.S3
-rw-r--r--arch/x86/lib/thunk_64.S3
-rw-r--r--arch/x86/mm/fault.c29
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/kprobes.h21
-rw-r--r--kernel/kprobes.c392
-rw-r--r--kernel/notifier.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/trace/trace_event_perf.c5
-rw-r--r--kernel/trace/trace_kprobe.c71
-rw-r--r--kernel/trace/trace_probe.c65
-rw-r--r--kernel/trace/trace_probe.h15
-rw-r--r--kernel/trace/trace_uprobe.c20
34 files changed, 583 insertions, 432 deletions
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 0cfb00fd86ff..4bbeca8483ed 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -22,8 +22,9 @@ Appendix B: The kprobes sysctl interface
22 22
23Kprobes enables you to dynamically break into any kernel routine and 23Kprobes enables you to dynamically break into any kernel routine and
24collect debugging and performance information non-disruptively. You 24collect debugging and performance information non-disruptively. You
25can trap at almost any kernel code address, specifying a handler 25can trap at almost any kernel code address(*), specifying a handler
26routine to be invoked when the breakpoint is hit. 26routine to be invoked when the breakpoint is hit.
27(*: some parts of the kernel code can not be trapped, see 1.5 Blacklist)
27 28
28There are currently three types of probes: kprobes, jprobes, and 29There are currently three types of probes: kprobes, jprobes, and
29kretprobes (also called return probes). A kprobe can be inserted 30kretprobes (also called return probes). A kprobe can be inserted
@@ -273,6 +274,19 @@ using one of the following techniques:
273 or 274 or
274- Execute 'sysctl -w debug.kprobes_optimization=n' 275- Execute 'sysctl -w debug.kprobes_optimization=n'
275 276
2771.5 Blacklist
278
279Kprobes can probe most of the kernel except itself. This means
280that there are some functions where kprobes cannot probe. Probing
281(trapping) such functions can cause a recursive trap (e.g. double
282fault) or the nested probe handler may never be called.
283Kprobes manages such functions as a blacklist.
284If you want to add a function into the blacklist, you just need
285to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro
286to specify a blacklisted function.
287Kprobes checks the given probe address against the blacklist and
288rejects registering it, if the given address is in the blacklist.
289
2762. Architectures Supported 2902. Architectures Supported
277 291
278Kprobes, jprobes, and return probes are implemented on the following 292Kprobes, jprobes, and return probes are implemented on the following
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 4582e8e1cd1a..7730c1c5c83a 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -57,6 +57,12 @@
57 .long (from) - . ; \ 57 .long (from) - . ; \
58 .long (to) - . + 0x7ffffff0 ; \ 58 .long (to) - . + 0x7ffffff0 ; \
59 .popsection 59 .popsection
60
61# define _ASM_NOKPROBE(entry) \
62 .pushsection "_kprobe_blacklist","aw" ; \
63 _ASM_ALIGN ; \
64 _ASM_PTR (entry); \
65 .popsection
60#else 66#else
61# define _ASM_EXTABLE(from,to) \ 67# define _ASM_EXTABLE(from,to) \
62 " .pushsection \"__ex_table\",\"a\"\n" \ 68 " .pushsection \"__ex_table\",\"a\"\n" \
@@ -71,6 +77,7 @@
71 " .long (" #from ") - .\n" \ 77 " .long (" #from ") - .\n" \
72 " .long (" #to ") - . + 0x7ffffff0\n" \ 78 " .long (" #to ") - . + 0x7ffffff0\n" \
73 " .popsection\n" 79 " .popsection\n"
80/* For C file, we already have NOKPROBE_SYMBOL macro */
74#endif 81#endif
75 82
76#endif /* _ASM_X86_ASM_H */ 83#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 9454c167629f..53cdfb2857ab 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -116,4 +116,6 @@ struct kprobe_ctlblk {
116extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 116extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
117extern int kprobe_exceptions_notify(struct notifier_block *self, 117extern int kprobe_exceptions_notify(struct notifier_block *self,
118 unsigned long val, void *data); 118 unsigned long val, void *data);
119extern int kprobe_int3_handler(struct pt_regs *regs);
120extern int kprobe_debug_handler(struct pt_regs *regs);
119#endif /* _ASM_X86_KPROBES_H */ 121#endif /* _ASM_X86_KPROBES_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index a7b212db9e04..cf69d050aa6e 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -68,7 +68,7 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
68dotraplinkage void do_stack_segment(struct pt_regs *, long); 68dotraplinkage void do_stack_segment(struct pt_regs *, long);
69#ifdef CONFIG_X86_64 69#ifdef CONFIG_X86_64
70dotraplinkage void do_double_fault(struct pt_regs *, long); 70dotraplinkage void do_double_fault(struct pt_regs *, long);
71asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); 71asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
72#endif 72#endif
73dotraplinkage void do_general_protection(struct pt_regs *, long); 73dotraplinkage void do_general_protection(struct pt_regs *, long);
74dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); 74dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index df94598ad05a..703130f469ec 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -5,7 +5,6 @@
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/stringify.h> 7#include <linux/stringify.h>
8#include <linux/kprobes.h>
9#include <linux/mm.h> 8#include <linux/mm.h>
10#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
11#include <linux/memory.h> 10#include <linux/memory.h>
@@ -551,7 +550,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
551 * 550 *
552 * Note: Must be called under text_mutex. 551 * Note: Must be called under text_mutex.
553 */ 552 */
554void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 553void *text_poke(void *addr, const void *opcode, size_t len)
555{ 554{
556 unsigned long flags; 555 unsigned long flags;
557 char *vaddr; 556 char *vaddr;
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index a698d7165c96..73eb5b336f63 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -60,7 +60,7 @@ void arch_trigger_all_cpu_backtrace(void)
60 smp_mb__after_clear_bit(); 60 smp_mb__after_clear_bit();
61} 61}
62 62
63static int __kprobes 63static int
64arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) 64arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
65{ 65{
66 int cpu; 66 int cpu;
@@ -80,6 +80,7 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
80 80
81 return NMI_DONE; 81 return NMI_DONE;
82} 82}
83NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
83 84
84static int __init register_trigger_all_cpu_backtrace(void) 85static int __init register_trigger_all_cpu_backtrace(void)
85{ 86{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a135239badb7..5af696dddd1d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -8,6 +8,7 @@
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/kprobes.h>
11#include <linux/kgdb.h> 12#include <linux/kgdb.h>
12#include <linux/smp.h> 13#include <linux/smp.h>
13#include <linux/io.h> 14#include <linux/io.h>
@@ -1160,6 +1161,7 @@ int is_debug_stack(unsigned long addr)
1160 (addr <= __get_cpu_var(debug_stack_addr) && 1161 (addr <= __get_cpu_var(debug_stack_addr) &&
1161 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1162 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
1162} 1163}
1164NOKPROBE_SYMBOL(is_debug_stack);
1163 1165
1164DEFINE_PER_CPU(u32, debug_idt_ctr); 1166DEFINE_PER_CPU(u32, debug_idt_ctr);
1165 1167
@@ -1168,6 +1170,7 @@ void debug_stack_set_zero(void)
1168 this_cpu_inc(debug_idt_ctr); 1170 this_cpu_inc(debug_idt_ctr);
1169 load_current_idt(); 1171 load_current_idt();
1170} 1172}
1173NOKPROBE_SYMBOL(debug_stack_set_zero);
1171 1174
1172void debug_stack_reset(void) 1175void debug_stack_reset(void)
1173{ 1176{
@@ -1176,6 +1179,7 @@ void debug_stack_reset(void)
1176 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1179 if (this_cpu_dec_return(debug_idt_ctr) == 0)
1177 load_current_idt(); 1180 load_current_idt();
1178} 1181}
1182NOKPROBE_SYMBOL(debug_stack_reset);
1179 1183
1180#else /* CONFIG_X86_64 */ 1184#else /* CONFIG_X86_64 */
1181 1185
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 89f3b7c1af20..32029e35f2b9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1293,7 +1293,7 @@ void perf_events_lapic_init(void)
1293 apic_write(APIC_LVTPC, APIC_DM_NMI); 1293 apic_write(APIC_LVTPC, APIC_DM_NMI);
1294} 1294}
1295 1295
1296static int __kprobes 1296static int
1297perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1297perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1298{ 1298{
1299 u64 start_clock; 1299 u64 start_clock;
@@ -1311,6 +1311,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1311 1311
1312 return ret; 1312 return ret;
1313} 1313}
1314NOKPROBE_SYMBOL(perf_event_nmi_handler);
1314 1315
1315struct event_constraint emptyconstraint; 1316struct event_constraint emptyconstraint;
1316struct event_constraint unconstrained; 1317struct event_constraint unconstrained;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 4c36bbe3173a..cbb1be3ed9e4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -593,7 +593,7 @@ out:
593 return 1; 593 return 1;
594} 594}
595 595
596static int __kprobes 596static int
597perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) 597perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
598{ 598{
599 int handled = 0; 599 int handled = 0;
@@ -606,6 +606,7 @@ perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
606 606
607 return handled; 607 return handled;
608} 608}
609NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
609 610
610static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) 611static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
611{ 612{
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index d9c12d3022a7..b74ebc7c4402 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -200,7 +200,7 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
200static int die_owner = -1; 200static int die_owner = -1;
201static unsigned int die_nest_count; 201static unsigned int die_nest_count;
202 202
203unsigned __kprobes long oops_begin(void) 203unsigned long oops_begin(void)
204{ 204{
205 int cpu; 205 int cpu;
206 unsigned long flags; 206 unsigned long flags;
@@ -223,8 +223,9 @@ unsigned __kprobes long oops_begin(void)
223 return flags; 223 return flags;
224} 224}
225EXPORT_SYMBOL_GPL(oops_begin); 225EXPORT_SYMBOL_GPL(oops_begin);
226NOKPROBE_SYMBOL(oops_begin);
226 227
227void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) 228void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
228{ 229{
229 if (regs && kexec_should_crash(current)) 230 if (regs && kexec_should_crash(current))
230 crash_kexec(regs); 231 crash_kexec(regs);
@@ -247,8 +248,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
247 panic("Fatal exception"); 248 panic("Fatal exception");
248 do_exit(signr); 249 do_exit(signr);
249} 250}
251NOKPROBE_SYMBOL(oops_end);
250 252
251int __kprobes __die(const char *str, struct pt_regs *regs, long err) 253int __die(const char *str, struct pt_regs *regs, long err)
252{ 254{
253#ifdef CONFIG_X86_32 255#ifdef CONFIG_X86_32
254 unsigned short ss; 256 unsigned short ss;
@@ -291,6 +293,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
291#endif 293#endif
292 return 0; 294 return 0;
293} 295}
296NOKPROBE_SYMBOL(__die);
294 297
295/* 298/*
296 * This is gone through when something in the kernel has done something bad 299 * This is gone through when something in the kernel has done something bad
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index a2a4f4697889..0ca5bf1697bb 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -315,10 +315,6 @@ ENTRY(ret_from_kernel_thread)
315ENDPROC(ret_from_kernel_thread) 315ENDPROC(ret_from_kernel_thread)
316 316
317/* 317/*
318 * Interrupt exit functions should be protected against kprobes
319 */
320 .pushsection .kprobes.text, "ax"
321/*
322 * Return to user mode is not as complex as all this looks, 318 * Return to user mode is not as complex as all this looks,
323 * but we want the default path for a system call return to 319 * but we want the default path for a system call return to
324 * go as quickly as possible which is why some of this is 320 * go as quickly as possible which is why some of this is
@@ -372,10 +368,6 @@ need_resched:
372END(resume_kernel) 368END(resume_kernel)
373#endif 369#endif
374 CFI_ENDPROC 370 CFI_ENDPROC
375/*
376 * End of kprobes section
377 */
378 .popsection
379 371
380/* SYSENTER_RETURN points to after the "sysenter" instruction in 372/* SYSENTER_RETURN points to after the "sysenter" instruction in
381 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 373 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
@@ -495,10 +487,6 @@ sysexit_audit:
495 PTGS_TO_GS_EX 487 PTGS_TO_GS_EX
496ENDPROC(ia32_sysenter_target) 488ENDPROC(ia32_sysenter_target)
497 489
498/*
499 * syscall stub including irq exit should be protected against kprobes
500 */
501 .pushsection .kprobes.text, "ax"
502 # system call handler stub 490 # system call handler stub
503ENTRY(system_call) 491ENTRY(system_call)
504 RING0_INT_FRAME # can't unwind into user space anyway 492 RING0_INT_FRAME # can't unwind into user space anyway
@@ -691,10 +679,6 @@ syscall_badsys:
691 jmp resume_userspace 679 jmp resume_userspace
692END(syscall_badsys) 680END(syscall_badsys)
693 CFI_ENDPROC 681 CFI_ENDPROC
694/*
695 * End of kprobes section
696 */
697 .popsection
698 682
699.macro FIXUP_ESPFIX_STACK 683.macro FIXUP_ESPFIX_STACK
700/* 684/*
@@ -781,10 +765,6 @@ common_interrupt:
781ENDPROC(common_interrupt) 765ENDPROC(common_interrupt)
782 CFI_ENDPROC 766 CFI_ENDPROC
783 767
784/*
785 * Irq entries should be protected against kprobes
786 */
787 .pushsection .kprobes.text, "ax"
788#define BUILD_INTERRUPT3(name, nr, fn) \ 768#define BUILD_INTERRUPT3(name, nr, fn) \
789ENTRY(name) \ 769ENTRY(name) \
790 RING0_INT_FRAME; \ 770 RING0_INT_FRAME; \
@@ -961,10 +941,6 @@ ENTRY(spurious_interrupt_bug)
961 jmp error_code 941 jmp error_code
962 CFI_ENDPROC 942 CFI_ENDPROC
963END(spurious_interrupt_bug) 943END(spurious_interrupt_bug)
964/*
965 * End of kprobes section
966 */
967 .popsection
968 944
969#ifdef CONFIG_XEN 945#ifdef CONFIG_XEN
970/* Xen doesn't set %esp to be precisely what the normal sysenter 946/* Xen doesn't set %esp to be precisely what the normal sysenter
@@ -1239,11 +1215,6 @@ return_to_handler:
1239 jmp *%ecx 1215 jmp *%ecx
1240#endif 1216#endif
1241 1217
1242/*
1243 * Some functions should be protected against kprobes
1244 */
1245 .pushsection .kprobes.text, "ax"
1246
1247#ifdef CONFIG_TRACING 1218#ifdef CONFIG_TRACING
1248ENTRY(trace_page_fault) 1219ENTRY(trace_page_fault)
1249 RING0_EC_FRAME 1220 RING0_EC_FRAME
@@ -1453,7 +1424,3 @@ ENTRY(async_page_fault)
1453END(async_page_fault) 1424END(async_page_fault)
1454#endif 1425#endif
1455 1426
1456/*
1457 * End of kprobes section
1458 */
1459 .popsection
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1e96c3628bf2..43bb38951660 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -487,8 +487,6 @@ ENDPROC(native_usergs_sysret64)
487 TRACE_IRQS_OFF 487 TRACE_IRQS_OFF
488 .endm 488 .endm
489 489
490/* save complete stack frame */
491 .pushsection .kprobes.text, "ax"
492ENTRY(save_paranoid) 490ENTRY(save_paranoid)
493 XCPT_FRAME 1 RDI+8 491 XCPT_FRAME 1 RDI+8
494 cld 492 cld
@@ -517,7 +515,6 @@ ENTRY(save_paranoid)
5171: ret 5151: ret
518 CFI_ENDPROC 516 CFI_ENDPROC
519END(save_paranoid) 517END(save_paranoid)
520 .popsection
521 518
522/* 519/*
523 * A newly forked process directly context switches into this address. 520 * A newly forked process directly context switches into this address.
@@ -975,10 +972,6 @@ END(interrupt)
975 call \func 972 call \func
976 .endm 973 .endm
977 974
978/*
979 * Interrupt entry/exit should be protected against kprobes
980 */
981 .pushsection .kprobes.text, "ax"
982 /* 975 /*
983 * The interrupt stubs push (~vector+0x80) onto the stack and 976 * The interrupt stubs push (~vector+0x80) onto the stack and
984 * then jump to common_interrupt. 977 * then jump to common_interrupt.
@@ -1113,10 +1106,6 @@ ENTRY(retint_kernel)
1113 1106
1114 CFI_ENDPROC 1107 CFI_ENDPROC
1115END(common_interrupt) 1108END(common_interrupt)
1116/*
1117 * End of kprobes section
1118 */
1119 .popsection
1120 1109
1121/* 1110/*
1122 * APIC interrupts. 1111 * APIC interrupts.
@@ -1477,11 +1466,6 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1477 hyperv_callback_vector hyperv_vector_handler 1466 hyperv_callback_vector hyperv_vector_handler
1478#endif /* CONFIG_HYPERV */ 1467#endif /* CONFIG_HYPERV */
1479 1468
1480/*
1481 * Some functions should be protected against kprobes
1482 */
1483 .pushsection .kprobes.text, "ax"
1484
1485paranoidzeroentry_ist debug do_debug DEBUG_STACK 1469paranoidzeroentry_ist debug do_debug DEBUG_STACK
1486paranoidzeroentry_ist int3 do_int3 DEBUG_STACK 1470paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1487paranoiderrorentry stack_segment do_stack_segment 1471paranoiderrorentry stack_segment do_stack_segment
@@ -1898,7 +1882,3 @@ ENTRY(ignore_sysret)
1898 CFI_ENDPROC 1882 CFI_ENDPROC
1899END(ignore_sysret) 1883END(ignore_sysret)
1900 1884
1901/*
1902 * End of kprobes section
1903 */
1904 .popsection
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a67b47c31314..5f9cf20cdb68 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,7 +32,6 @@
32#include <linux/irqflags.h> 32#include <linux/irqflags.h>
33#include <linux/notifier.h> 33#include <linux/notifier.h>
34#include <linux/kallsyms.h> 34#include <linux/kallsyms.h>
35#include <linux/kprobes.h>
36#include <linux/percpu.h> 35#include <linux/percpu.h>
37#include <linux/kdebug.h> 36#include <linux/kdebug.h>
38#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
424 * NOTIFY_STOP returned for all other cases 423 * NOTIFY_STOP returned for all other cases
425 * 424 *
426 */ 425 */
427static int __kprobes hw_breakpoint_handler(struct die_args *args) 426static int hw_breakpoint_handler(struct die_args *args)
428{ 427{
429 int i, cpu, rc = NOTIFY_STOP; 428 int i, cpu, rc = NOTIFY_STOP;
430 struct perf_event *bp; 429 struct perf_event *bp;
@@ -511,7 +510,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
511/* 510/*
512 * Handle debug exception notifications. 511 * Handle debug exception notifications.
513 */ 512 */
514int __kprobes hw_breakpoint_exceptions_notify( 513int hw_breakpoint_exceptions_notify(
515 struct notifier_block *unused, unsigned long val, void *data) 514 struct notifier_block *unused, unsigned long val, void *data)
516{ 515{
517 if (val != DIE_DEBUG) 516 if (val != DIE_DEBUG)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 61b17dc2c277..7596df664901 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -112,7 +112,8 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
112 112
113const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 113const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
114 114
115static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) 115static nokprobe_inline void
116__synthesize_relative_insn(void *from, void *to, u8 op)
116{ 117{
117 struct __arch_relative_insn { 118 struct __arch_relative_insn {
118 u8 op; 119 u8 op;
@@ -125,21 +126,23 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
125} 126}
126 127
127/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 128/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
128void __kprobes synthesize_reljump(void *from, void *to) 129void synthesize_reljump(void *from, void *to)
129{ 130{
130 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); 131 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
131} 132}
133NOKPROBE_SYMBOL(synthesize_reljump);
132 134
133/* Insert a call instruction at address 'from', which calls address 'to'.*/ 135/* Insert a call instruction at address 'from', which calls address 'to'.*/
134void __kprobes synthesize_relcall(void *from, void *to) 136void synthesize_relcall(void *from, void *to)
135{ 137{
136 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); 138 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
137} 139}
140NOKPROBE_SYMBOL(synthesize_relcall);
138 141
139/* 142/*
140 * Skip the prefixes of the instruction. 143 * Skip the prefixes of the instruction.
141 */ 144 */
142static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) 145static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
143{ 146{
144 insn_attr_t attr; 147 insn_attr_t attr;
145 148
@@ -154,12 +157,13 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
154#endif 157#endif
155 return insn; 158 return insn;
156} 159}
160NOKPROBE_SYMBOL(skip_prefixes);
157 161
158/* 162/*
159 * Returns non-zero if opcode is boostable. 163 * Returns non-zero if opcode is boostable.
160 * RIP relative instructions are adjusted at copying time in 64 bits mode 164 * RIP relative instructions are adjusted at copying time in 64 bits mode
161 */ 165 */
162int __kprobes can_boost(kprobe_opcode_t *opcodes) 166int can_boost(kprobe_opcode_t *opcodes)
163{ 167{
164 kprobe_opcode_t opcode; 168 kprobe_opcode_t opcode;
165 kprobe_opcode_t *orig_opcodes = opcodes; 169 kprobe_opcode_t *orig_opcodes = opcodes;
@@ -260,7 +264,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
260} 264}
261 265
262/* Check if paddr is at an instruction boundary */ 266/* Check if paddr is at an instruction boundary */
263static int __kprobes can_probe(unsigned long paddr) 267static int can_probe(unsigned long paddr)
264{ 268{
265 unsigned long addr, __addr, offset = 0; 269 unsigned long addr, __addr, offset = 0;
266 struct insn insn; 270 struct insn insn;
@@ -299,7 +303,7 @@ static int __kprobes can_probe(unsigned long paddr)
299/* 303/*
300 * Returns non-zero if opcode modifies the interrupt flag. 304 * Returns non-zero if opcode modifies the interrupt flag.
301 */ 305 */
302static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) 306static int is_IF_modifier(kprobe_opcode_t *insn)
303{ 307{
304 /* Skip prefixes */ 308 /* Skip prefixes */
305 insn = skip_prefixes(insn); 309 insn = skip_prefixes(insn);
@@ -322,7 +326,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
322 * If not, return null. 326 * If not, return null.
323 * Only applicable to 64-bit x86. 327 * Only applicable to 64-bit x86.
324 */ 328 */
325int __kprobes __copy_instruction(u8 *dest, u8 *src) 329int __copy_instruction(u8 *dest, u8 *src)
326{ 330{
327 struct insn insn; 331 struct insn insn;
328 kprobe_opcode_t buf[MAX_INSN_SIZE]; 332 kprobe_opcode_t buf[MAX_INSN_SIZE];
@@ -365,7 +369,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 369 return insn.length;
366} 370}
367 371
368static int __kprobes arch_copy_kprobe(struct kprobe *p) 372static int arch_copy_kprobe(struct kprobe *p)
369{ 373{
370 int ret; 374 int ret;
371 375
@@ -392,7 +396,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p)
392 return 0; 396 return 0;
393} 397}
394 398
395int __kprobes arch_prepare_kprobe(struct kprobe *p) 399int arch_prepare_kprobe(struct kprobe *p)
396{ 400{
397 if (alternatives_text_reserved(p->addr, p->addr)) 401 if (alternatives_text_reserved(p->addr, p->addr))
398 return -EINVAL; 402 return -EINVAL;
@@ -407,17 +411,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
407 return arch_copy_kprobe(p); 411 return arch_copy_kprobe(p);
408} 412}
409 413
410void __kprobes arch_arm_kprobe(struct kprobe *p) 414void arch_arm_kprobe(struct kprobe *p)
411{ 415{
412 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); 416 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
413} 417}
414 418
415void __kprobes arch_disarm_kprobe(struct kprobe *p) 419void arch_disarm_kprobe(struct kprobe *p)
416{ 420{
417 text_poke(p->addr, &p->opcode, 1); 421 text_poke(p->addr, &p->opcode, 1);
418} 422}
419 423
420void __kprobes arch_remove_kprobe(struct kprobe *p) 424void arch_remove_kprobe(struct kprobe *p)
421{ 425{
422 if (p->ainsn.insn) { 426 if (p->ainsn.insn) {
423 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 427 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
@@ -425,7 +429,8 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
425 } 429 }
426} 430}
427 431
428static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 432static nokprobe_inline void
433save_previous_kprobe(struct kprobe_ctlblk *kcb)
429{ 434{
430 kcb->prev_kprobe.kp = kprobe_running(); 435 kcb->prev_kprobe.kp = kprobe_running();
431 kcb->prev_kprobe.status = kcb->kprobe_status; 436 kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -433,7 +438,8 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
433 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; 438 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
434} 439}
435 440
436static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 441static nokprobe_inline void
442restore_previous_kprobe(struct kprobe_ctlblk *kcb)
437{ 443{
438 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 444 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
439 kcb->kprobe_status = kcb->prev_kprobe.status; 445 kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -441,8 +447,9 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
441 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; 447 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
442} 448}
443 449
444static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 450static nokprobe_inline void
445 struct kprobe_ctlblk *kcb) 451set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
452 struct kprobe_ctlblk *kcb)
446{ 453{
447 __this_cpu_write(current_kprobe, p); 454 __this_cpu_write(current_kprobe, p);
448 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 455 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
@@ -451,7 +458,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
451 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; 458 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
452} 459}
453 460
454static void __kprobes clear_btf(void) 461static nokprobe_inline void clear_btf(void)
455{ 462{
456 if (test_thread_flag(TIF_BLOCKSTEP)) { 463 if (test_thread_flag(TIF_BLOCKSTEP)) {
457 unsigned long debugctl = get_debugctlmsr(); 464 unsigned long debugctl = get_debugctlmsr();
@@ -461,7 +468,7 @@ static void __kprobes clear_btf(void)
461 } 468 }
462} 469}
463 470
464static void __kprobes restore_btf(void) 471static nokprobe_inline void restore_btf(void)
465{ 472{
466 if (test_thread_flag(TIF_BLOCKSTEP)) { 473 if (test_thread_flag(TIF_BLOCKSTEP)) {
467 unsigned long debugctl = get_debugctlmsr(); 474 unsigned long debugctl = get_debugctlmsr();
@@ -471,8 +478,7 @@ static void __kprobes restore_btf(void)
471 } 478 }
472} 479}
473 480
474void __kprobes 481void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
475arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
476{ 482{
477 unsigned long *sara = stack_addr(regs); 483 unsigned long *sara = stack_addr(regs);
478 484
@@ -481,9 +487,10 @@ arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
481 /* Replace the return addr with trampoline addr */ 487 /* Replace the return addr with trampoline addr */
482 *sara = (unsigned long) &kretprobe_trampoline; 488 *sara = (unsigned long) &kretprobe_trampoline;
483} 489}
490NOKPROBE_SYMBOL(arch_prepare_kretprobe);
484 491
485static void __kprobes 492static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
486setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) 493 struct kprobe_ctlblk *kcb, int reenter)
487{ 494{
488 if (setup_detour_execution(p, regs, reenter)) 495 if (setup_detour_execution(p, regs, reenter))
489 return; 496 return;
@@ -519,22 +526,24 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
519 else 526 else
520 regs->ip = (unsigned long)p->ainsn.insn; 527 regs->ip = (unsigned long)p->ainsn.insn;
521} 528}
529NOKPROBE_SYMBOL(setup_singlestep);
522 530
523/* 531/*
524 * We have reentered the kprobe_handler(), since another probe was hit while 532 * We have reentered the kprobe_handler(), since another probe was hit while
525 * within the handler. We save the original kprobes variables and just single 533 * within the handler. We save the original kprobes variables and just single
526 * step on the instruction of the new probe without calling any user handlers. 534 * step on the instruction of the new probe without calling any user handlers.
527 */ 535 */
528static int __kprobes 536static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
529reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) 537 struct kprobe_ctlblk *kcb)
530{ 538{
531 switch (kcb->kprobe_status) { 539 switch (kcb->kprobe_status) {
532 case KPROBE_HIT_SSDONE: 540 case KPROBE_HIT_SSDONE:
533 case KPROBE_HIT_ACTIVE: 541 case KPROBE_HIT_ACTIVE:
542 case KPROBE_HIT_SS:
534 kprobes_inc_nmissed_count(p); 543 kprobes_inc_nmissed_count(p);
535 setup_singlestep(p, regs, kcb, 1); 544 setup_singlestep(p, regs, kcb, 1);
536 break; 545 break;
537 case KPROBE_HIT_SS: 546 case KPROBE_REENTER:
538 /* A probe has been hit in the codepath leading up to, or just 547 /* A probe has been hit in the codepath leading up to, or just
539 * after, single-stepping of a probed instruction. This entire 548 * after, single-stepping of a probed instruction. This entire
540 * codepath should strictly reside in .kprobes.text section. 549 * codepath should strictly reside in .kprobes.text section.
@@ -553,12 +562,13 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
553 562
554 return 1; 563 return 1;
555} 564}
565NOKPROBE_SYMBOL(reenter_kprobe);
556 566
557/* 567/*
558 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 568 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
559 * remain disabled throughout this function. 569 * remain disabled throughout this function.
560 */ 570 */
561static int __kprobes kprobe_handler(struct pt_regs *regs) 571int kprobe_int3_handler(struct pt_regs *regs)
562{ 572{
563 kprobe_opcode_t *addr; 573 kprobe_opcode_t *addr;
564 struct kprobe *p; 574 struct kprobe *p;
@@ -621,12 +631,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
621 preempt_enable_no_resched(); 631 preempt_enable_no_resched();
622 return 0; 632 return 0;
623} 633}
634NOKPROBE_SYMBOL(kprobe_int3_handler);
624 635
625/* 636/*
626 * When a retprobed function returns, this code saves registers and 637 * When a retprobed function returns, this code saves registers and
627 * calls trampoline_handler() runs, which calls the kretprobe's handler. 638 * calls trampoline_handler() runs, which calls the kretprobe's handler.
628 */ 639 */
629static void __used __kprobes kretprobe_trampoline_holder(void) 640static void __used kretprobe_trampoline_holder(void)
630{ 641{
631 asm volatile ( 642 asm volatile (
632 ".global kretprobe_trampoline\n" 643 ".global kretprobe_trampoline\n"
@@ -657,11 +668,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
657#endif 668#endif
658 " ret\n"); 669 " ret\n");
659} 670}
671NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
672NOKPROBE_SYMBOL(kretprobe_trampoline);
660 673
661/* 674/*
662 * Called from kretprobe_trampoline 675 * Called from kretprobe_trampoline
663 */ 676 */
664__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) 677__visible __used void *trampoline_handler(struct pt_regs *regs)
665{ 678{
666 struct kretprobe_instance *ri = NULL; 679 struct kretprobe_instance *ri = NULL;
667 struct hlist_head *head, empty_rp; 680 struct hlist_head *head, empty_rp;
@@ -747,6 +760,7 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
747 } 760 }
748 return (void *)orig_ret_address; 761 return (void *)orig_ret_address;
749} 762}
763NOKPROBE_SYMBOL(trampoline_handler);
750 764
751/* 765/*
752 * Called after single-stepping. p->addr is the address of the 766 * Called after single-stepping. p->addr is the address of the
@@ -775,8 +789,8 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
775 * jump instruction after the copied instruction, that jumps to the next 789 * jump instruction after the copied instruction, that jumps to the next
776 * instruction after the probepoint. 790 * instruction after the probepoint.
777 */ 791 */
778static void __kprobes 792static void resume_execution(struct kprobe *p, struct pt_regs *regs,
779resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) 793 struct kprobe_ctlblk *kcb)
780{ 794{
781 unsigned long *tos = stack_addr(regs); 795 unsigned long *tos = stack_addr(regs);
782 unsigned long copy_ip = (unsigned long)p->ainsn.insn; 796 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
@@ -851,12 +865,13 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
851no_change: 865no_change:
852 restore_btf(); 866 restore_btf();
853} 867}
868NOKPROBE_SYMBOL(resume_execution);
854 869
855/* 870/*
856 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 871 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
857 * remain disabled throughout this function. 872 * remain disabled throughout this function.
858 */ 873 */
859static int __kprobes post_kprobe_handler(struct pt_regs *regs) 874int kprobe_debug_handler(struct pt_regs *regs)
860{ 875{
861 struct kprobe *cur = kprobe_running(); 876 struct kprobe *cur = kprobe_running();
862 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 877 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -891,8 +906,9 @@ out:
891 906
892 return 1; 907 return 1;
893} 908}
909NOKPROBE_SYMBOL(kprobe_debug_handler);
894 910
895int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 911int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
896{ 912{
897 struct kprobe *cur = kprobe_running(); 913 struct kprobe *cur = kprobe_running();
898 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 914 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -949,12 +965,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
949 965
950 return 0; 966 return 0;
951} 967}
968NOKPROBE_SYMBOL(kprobe_fault_handler);
952 969
953/* 970/*
954 * Wrapper routine for handling exceptions. 971 * Wrapper routine for handling exceptions.
955 */ 972 */
956int __kprobes 973int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
957kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) 974 void *data)
958{ 975{
959 struct die_args *args = data; 976 struct die_args *args = data;
960 int ret = NOTIFY_DONE; 977 int ret = NOTIFY_DONE;
@@ -962,22 +979,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
962 if (args->regs && user_mode_vm(args->regs)) 979 if (args->regs && user_mode_vm(args->regs))
963 return ret; 980 return ret;
964 981
965 switch (val) { 982 if (val == DIE_GPF) {
966 case DIE_INT3:
967 if (kprobe_handler(args->regs))
968 ret = NOTIFY_STOP;
969 break;
970 case DIE_DEBUG:
971 if (post_kprobe_handler(args->regs)) {
972 /*
973 * Reset the BS bit in dr6 (pointed by args->err) to
974 * denote completion of processing
975 */
976 (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
977 ret = NOTIFY_STOP;
978 }
979 break;
980 case DIE_GPF:
981 /* 983 /*
982 * To be potentially processing a kprobe fault and to 984 * To be potentially processing a kprobe fault and to
983 * trust the result from kprobe_running(), we have 985 * trust the result from kprobe_running(), we have
@@ -986,14 +988,12 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
986 if (!preemptible() && kprobe_running() && 988 if (!preemptible() && kprobe_running() &&
987 kprobe_fault_handler(args->regs, args->trapnr)) 989 kprobe_fault_handler(args->regs, args->trapnr))
988 ret = NOTIFY_STOP; 990 ret = NOTIFY_STOP;
989 break;
990 default:
991 break;
992 } 991 }
993 return ret; 992 return ret;
994} 993}
994NOKPROBE_SYMBOL(kprobe_exceptions_notify);
995 995
996int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 996int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
997{ 997{
998 struct jprobe *jp = container_of(p, struct jprobe, kp); 998 struct jprobe *jp = container_of(p, struct jprobe, kp);
999 unsigned long addr; 999 unsigned long addr;
@@ -1017,8 +1017,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1017 regs->ip = (unsigned long)(jp->entry); 1017 regs->ip = (unsigned long)(jp->entry);
1018 return 1; 1018 return 1;
1019} 1019}
1020NOKPROBE_SYMBOL(setjmp_pre_handler);
1020 1021
1021void __kprobes jprobe_return(void) 1022void jprobe_return(void)
1022{ 1023{
1023 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1024 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1024 1025
@@ -1034,8 +1035,10 @@ void __kprobes jprobe_return(void)
1034 " nop \n"::"b" 1035 " nop \n"::"b"
1035 (kcb->jprobe_saved_sp):"memory"); 1036 (kcb->jprobe_saved_sp):"memory");
1036} 1037}
1038NOKPROBE_SYMBOL(jprobe_return);
1039NOKPROBE_SYMBOL(jprobe_return_end);
1037 1040
1038int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 1041int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1039{ 1042{
1040 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1043 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1041 u8 *addr = (u8 *) (regs->ip - 1); 1044 u8 *addr = (u8 *) (regs->ip - 1);
@@ -1063,13 +1066,22 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1063 } 1066 }
1064 return 0; 1067 return 0;
1065} 1068}
1069NOKPROBE_SYMBOL(longjmp_break_handler);
1070
1071bool arch_within_kprobe_blacklist(unsigned long addr)
1072{
1073 return (addr >= (unsigned long)__kprobes_text_start &&
1074 addr < (unsigned long)__kprobes_text_end) ||
1075 (addr >= (unsigned long)__entry_text_start &&
1076 addr < (unsigned long)__entry_text_end);
1077}
1066 1078
1067int __init arch_init_kprobes(void) 1079int __init arch_init_kprobes(void)
1068{ 1080{
1069 return 0; 1081 return 0;
1070} 1082}
1071 1083
1072int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1084int arch_trampoline_kprobe(struct kprobe *p)
1073{ 1085{
1074 return 0; 1086 return 0;
1075} 1087}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 23ef5c556f06..717b02a22e67 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -25,8 +25,9 @@
25 25
26#include "common.h" 26#include "common.h"
27 27
28static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, 28static nokprobe_inline
29 struct kprobe_ctlblk *kcb) 29int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
30 struct kprobe_ctlblk *kcb)
30{ 31{
31 /* 32 /*
32 * Emulate singlestep (and also recover regs->ip) 33 * Emulate singlestep (and also recover regs->ip)
@@ -41,18 +42,19 @@ static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
41 return 1; 42 return 1;
42} 43}
43 44
44int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, 45int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45 struct kprobe_ctlblk *kcb) 46 struct kprobe_ctlblk *kcb)
46{ 47{
47 if (kprobe_ftrace(p)) 48 if (kprobe_ftrace(p))
48 return __skip_singlestep(p, regs, kcb); 49 return __skip_singlestep(p, regs, kcb);
49 else 50 else
50 return 0; 51 return 0;
51} 52}
53NOKPROBE_SYMBOL(skip_singlestep);
52 54
53/* Ftrace callback handler for kprobes */ 55/* Ftrace callback handler for kprobes */
54void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 56void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
55 struct ftrace_ops *ops, struct pt_regs *regs) 57 struct ftrace_ops *ops, struct pt_regs *regs)
56{ 58{
57 struct kprobe *p; 59 struct kprobe *p;
58 struct kprobe_ctlblk *kcb; 60 struct kprobe_ctlblk *kcb;
@@ -84,8 +86,9 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
84end: 86end:
85 local_irq_restore(flags); 87 local_irq_restore(flags);
86} 88}
89NOKPROBE_SYMBOL(kprobe_ftrace_handler);
87 90
88int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) 91int arch_prepare_kprobe_ftrace(struct kprobe *p)
89{ 92{
90 p->ainsn.insn = NULL; 93 p->ainsn.insn = NULL;
91 p->ainsn.boostable = -1; 94 p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 898160b42e43..f304773285ae 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -77,7 +77,7 @@ found:
77} 77}
78 78
79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ 79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
80static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) 80static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
81{ 81{
82#ifdef CONFIG_X86_64 82#ifdef CONFIG_X86_64
83 *addr++ = 0x48; 83 *addr++ = 0x48;
@@ -138,7 +138,8 @@ asm (
138#define INT3_SIZE sizeof(kprobe_opcode_t) 138#define INT3_SIZE sizeof(kprobe_opcode_t)
139 139
140/* Optimized kprobe call back function: called from optinsn */ 140/* Optimized kprobe call back function: called from optinsn */
141static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) 141static void
142optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
142{ 143{
143 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 144 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
144 unsigned long flags; 145 unsigned long flags;
@@ -168,8 +169,9 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_
168 } 169 }
169 local_irq_restore(flags); 170 local_irq_restore(flags);
170} 171}
172NOKPROBE_SYMBOL(optimized_callback);
171 173
172static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) 174static int copy_optimized_instructions(u8 *dest, u8 *src)
173{ 175{
174 int len = 0, ret; 176 int len = 0, ret;
175 177
@@ -189,7 +191,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
189} 191}
190 192
191/* Check whether insn is indirect jump */ 193/* Check whether insn is indirect jump */
192static int __kprobes insn_is_indirect_jump(struct insn *insn) 194static int insn_is_indirect_jump(struct insn *insn)
193{ 195{
194 return ((insn->opcode.bytes[0] == 0xff && 196 return ((insn->opcode.bytes[0] == 0xff &&
195 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 197 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -224,7 +226,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
224} 226}
225 227
226/* Decode whole function to ensure any instructions don't jump into target */ 228/* Decode whole function to ensure any instructions don't jump into target */
227static int __kprobes can_optimize(unsigned long paddr) 229static int can_optimize(unsigned long paddr)
228{ 230{
229 unsigned long addr, size = 0, offset = 0; 231 unsigned long addr, size = 0, offset = 0;
230 struct insn insn; 232 struct insn insn;
@@ -275,7 +277,7 @@ static int __kprobes can_optimize(unsigned long paddr)
275} 277}
276 278
277/* Check optimized_kprobe can actually be optimized. */ 279/* Check optimized_kprobe can actually be optimized. */
278int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) 280int arch_check_optimized_kprobe(struct optimized_kprobe *op)
279{ 281{
280 int i; 282 int i;
281 struct kprobe *p; 283 struct kprobe *p;
@@ -290,15 +292,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
290} 292}
291 293
292/* Check the addr is within the optimized instructions. */ 294/* Check the addr is within the optimized instructions. */
293int __kprobes 295int arch_within_optimized_kprobe(struct optimized_kprobe *op,
294arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) 296 unsigned long addr)
295{ 297{
296 return ((unsigned long)op->kp.addr <= addr && 298 return ((unsigned long)op->kp.addr <= addr &&
297 (unsigned long)op->kp.addr + op->optinsn.size > addr); 299 (unsigned long)op->kp.addr + op->optinsn.size > addr);
298} 300}
299 301
300/* Free optimized instruction slot */ 302/* Free optimized instruction slot */
301static __kprobes 303static
302void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) 304void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
303{ 305{
304 if (op->optinsn.insn) { 306 if (op->optinsn.insn) {
@@ -308,7 +310,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
308 } 310 }
309} 311}
310 312
311void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) 313void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
312{ 314{
313 __arch_remove_optimized_kprobe(op, 1); 315 __arch_remove_optimized_kprobe(op, 1);
314} 316}
@@ -318,7 +320,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
318 * Target instructions MUST be relocatable (checked inside) 320 * Target instructions MUST be relocatable (checked inside)
319 * This is called when new aggr(opt)probe is allocated or reused. 321 * This is called when new aggr(opt)probe is allocated or reused.
320 */ 322 */
321int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) 323int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
322{ 324{
323 u8 *buf; 325 u8 *buf;
324 int ret; 326 int ret;
@@ -372,7 +374,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
372 * Replace breakpoints (int3) with relative jumps. 374 * Replace breakpoints (int3) with relative jumps.
373 * Caller must call with locking kprobe_mutex and text_mutex. 375 * Caller must call with locking kprobe_mutex and text_mutex.
374 */ 376 */
375void __kprobes arch_optimize_kprobes(struct list_head *oplist) 377void arch_optimize_kprobes(struct list_head *oplist)
376{ 378{
377 struct optimized_kprobe *op, *tmp; 379 struct optimized_kprobe *op, *tmp;
378 u8 insn_buf[RELATIVEJUMP_SIZE]; 380 u8 insn_buf[RELATIVEJUMP_SIZE];
@@ -398,7 +400,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
398} 400}
399 401
400/* Replace a relative jump with a breakpoint (int3). */ 402/* Replace a relative jump with a breakpoint (int3). */
401void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) 403void arch_unoptimize_kprobe(struct optimized_kprobe *op)
402{ 404{
403 u8 insn_buf[RELATIVEJUMP_SIZE]; 405 u8 insn_buf[RELATIVEJUMP_SIZE];
404 406
@@ -424,8 +426,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
424 } 426 }
425} 427}
426 428
427int __kprobes 429int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
428setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
429{ 430{
430 struct optimized_kprobe *op; 431 struct optimized_kprobe *op;
431 432
@@ -441,3 +442,4 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
441 } 442 }
442 return 0; 443 return 0;
443} 444}
445NOKPROBE_SYMBOL(setup_detour_execution);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 0331cb389d68..d81abcbfe501 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -251,8 +251,9 @@ u32 kvm_read_and_reset_pf_reason(void)
251 return reason; 251 return reason;
252} 252}
253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
254 255
255dotraplinkage void __kprobes 256dotraplinkage void
256do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 257do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
257{ 258{
258 enum ctx_state prev_state; 259 enum ctx_state prev_state;
@@ -276,6 +277,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
276 break; 277 break;
277 } 278 }
278} 279}
280NOKPROBE_SYMBOL(do_async_page_fault);
279 281
280static void __init paravirt_ops_setup(void) 282static void __init paravirt_ops_setup(void)
281{ 283{
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index b4872b999a71..c3e985d1751c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w)
110 a->handler, whole_msecs, decimal_msecs); 110 a->handler, whole_msecs, decimal_msecs);
111} 111}
112 112
113static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) 113static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
114{ 114{
115 struct nmi_desc *desc = nmi_to_desc(type); 115 struct nmi_desc *desc = nmi_to_desc(type);
116 struct nmiaction *a; 116 struct nmiaction *a;
@@ -146,6 +146,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
146 /* return total number of NMI events handled */ 146 /* return total number of NMI events handled */
147 return handled; 147 return handled;
148} 148}
149NOKPROBE_SYMBOL(nmi_handle);
149 150
150int __register_nmi_handler(unsigned int type, struct nmiaction *action) 151int __register_nmi_handler(unsigned int type, struct nmiaction *action)
151{ 152{
@@ -208,7 +209,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
208} 209}
209EXPORT_SYMBOL_GPL(unregister_nmi_handler); 210EXPORT_SYMBOL_GPL(unregister_nmi_handler);
210 211
211static __kprobes void 212static void
212pci_serr_error(unsigned char reason, struct pt_regs *regs) 213pci_serr_error(unsigned char reason, struct pt_regs *regs)
213{ 214{
214 /* check to see if anyone registered against these types of errors */ 215 /* check to see if anyone registered against these types of errors */
@@ -238,8 +239,9 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
238 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; 239 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
239 outb(reason, NMI_REASON_PORT); 240 outb(reason, NMI_REASON_PORT);
240} 241}
242NOKPROBE_SYMBOL(pci_serr_error);
241 243
242static __kprobes void 244static void
243io_check_error(unsigned char reason, struct pt_regs *regs) 245io_check_error(unsigned char reason, struct pt_regs *regs)
244{ 246{
245 unsigned long i; 247 unsigned long i;
@@ -269,8 +271,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
269 reason &= ~NMI_REASON_CLEAR_IOCHK; 271 reason &= ~NMI_REASON_CLEAR_IOCHK;
270 outb(reason, NMI_REASON_PORT); 272 outb(reason, NMI_REASON_PORT);
271} 273}
274NOKPROBE_SYMBOL(io_check_error);
272 275
273static __kprobes void 276static void
274unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 277unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
275{ 278{
276 int handled; 279 int handled;
@@ -298,11 +301,12 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
298 301
299 pr_emerg("Dazed and confused, but trying to continue\n"); 302 pr_emerg("Dazed and confused, but trying to continue\n");
300} 303}
304NOKPROBE_SYMBOL(unknown_nmi_error);
301 305
302static DEFINE_PER_CPU(bool, swallow_nmi); 306static DEFINE_PER_CPU(bool, swallow_nmi);
303static DEFINE_PER_CPU(unsigned long, last_nmi_rip); 307static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
304 308
305static __kprobes void default_do_nmi(struct pt_regs *regs) 309static void default_do_nmi(struct pt_regs *regs)
306{ 310{
307 unsigned char reason = 0; 311 unsigned char reason = 0;
308 int handled; 312 int handled;
@@ -401,6 +405,7 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
401 else 405 else
402 unknown_nmi_error(reason, regs); 406 unknown_nmi_error(reason, regs);
403} 407}
408NOKPROBE_SYMBOL(default_do_nmi);
404 409
405/* 410/*
406 * NMIs can hit breakpoints which will cause it to lose its 411 * NMIs can hit breakpoints which will cause it to lose its
@@ -520,7 +525,7 @@ static inline void nmi_nesting_postprocess(void)
520} 525}
521#endif 526#endif
522 527
523dotraplinkage notrace __kprobes void 528dotraplinkage notrace void
524do_nmi(struct pt_regs *regs, long error_code) 529do_nmi(struct pt_regs *regs, long error_code)
525{ 530{
526 nmi_nesting_preprocess(regs); 531 nmi_nesting_preprocess(regs);
@@ -537,6 +542,7 @@ do_nmi(struct pt_regs *regs, long error_code)
537 /* On i386, may loop back to preprocess */ 542 /* On i386, may loop back to preprocess */
538 nmi_nesting_postprocess(); 543 nmi_nesting_postprocess();
539} 544}
545NOKPROBE_SYMBOL(do_nmi);
540 546
541void stop_nmi(void) 547void stop_nmi(void)
542{ 548{
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b10af835c31..548d25f00c90 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -23,6 +23,7 @@
23#include <linux/efi.h> 23#include <linux/efi.h>
24#include <linux/bcd.h> 24#include <linux/bcd.h>
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/kprobes.h>
26 27
27#include <asm/bug.h> 28#include <asm/bug.h>
28#include <asm/paravirt.h> 29#include <asm/paravirt.h>
@@ -389,6 +390,11 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
389 .end_context_switch = paravirt_nop, 390 .end_context_switch = paravirt_nop,
390}; 391};
391 392
393/* At this point, native_get/set_debugreg has real function entries */
394NOKPROBE_SYMBOL(native_get_debugreg);
395NOKPROBE_SYMBOL(native_set_debugreg);
396NOKPROBE_SYMBOL(native_load_idt);
397
392struct pv_apic_ops pv_apic_ops = { 398struct pv_apic_ops pv_apic_ops = {
393#ifdef CONFIG_X86_LOCAL_APIC 399#ifdef CONFIG_X86_LOCAL_APIC
394 .startup_ipi_hook = paravirt_nop, 400 .startup_ipi_hook = paravirt_nop,
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3fdb20548c4b..461926b0577c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -107,7 +107,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
107 preempt_count_dec(); 107 preempt_count_dec();
108} 108}
109 109
110static int __kprobes 110static nokprobe_inline int
111do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 111do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
112 struct pt_regs *regs, long error_code) 112 struct pt_regs *regs, long error_code)
113{ 113{
@@ -168,7 +168,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
168 return info; 168 return info;
169} 169}
170 170
171static void __kprobes 171static void
172do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 172do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
173 long error_code, siginfo_t *info) 173 long error_code, siginfo_t *info)
174{ 174{
@@ -202,6 +202,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
202 202
203 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); 203 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
204} 204}
205NOKPROBE_SYMBOL(do_trap);
205 206
206static void do_error_trap(struct pt_regs *regs, long error_code, char *str, 207static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
207 unsigned long trapnr, int signr) 208 unsigned long trapnr, int signr)
@@ -277,7 +278,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
277} 278}
278#endif 279#endif
279 280
280dotraplinkage void __kprobes 281dotraplinkage void
281do_general_protection(struct pt_regs *regs, long error_code) 282do_general_protection(struct pt_regs *regs, long error_code)
282{ 283{
283 struct task_struct *tsk; 284 struct task_struct *tsk;
@@ -323,9 +324,10 @@ do_general_protection(struct pt_regs *regs, long error_code)
323exit: 324exit:
324 exception_exit(prev_state); 325 exception_exit(prev_state);
325} 326}
327NOKPROBE_SYMBOL(do_general_protection);
326 328
327/* May run on IST stack. */ 329/* May run on IST stack. */
328dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 330dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
329{ 331{
330 enum ctx_state prev_state; 332 enum ctx_state prev_state;
331 333
@@ -341,13 +343,18 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
341 if (poke_int3_handler(regs)) 343 if (poke_int3_handler(regs))
342 return; 344 return;
343 345
344 prev_state = exception_enter();
345#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 346#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
346 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 347 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
347 SIGTRAP) == NOTIFY_STOP) 348 SIGTRAP) == NOTIFY_STOP)
348 goto exit; 349 goto exit;
349#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 350#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
350 351
352#ifdef CONFIG_KPROBES
353 if (kprobe_int3_handler(regs))
354 return;
355#endif
356 prev_state = exception_enter();
357
351 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 358 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
352 SIGTRAP) == NOTIFY_STOP) 359 SIGTRAP) == NOTIFY_STOP)
353 goto exit; 360 goto exit;
@@ -364,6 +371,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
364exit: 371exit:
365 exception_exit(prev_state); 372 exception_exit(prev_state);
366} 373}
374NOKPROBE_SYMBOL(do_int3);
367 375
368#ifdef CONFIG_X86_64 376#ifdef CONFIG_X86_64
369/* 377/*
@@ -371,7 +379,7 @@ exit:
371 * for scheduling or signal handling. The actual stack switch is done in 379 * for scheduling or signal handling. The actual stack switch is done in
372 * entry.S 380 * entry.S
373 */ 381 */
374asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 382asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
375{ 383{
376 struct pt_regs *regs = eregs; 384 struct pt_regs *regs = eregs;
377 /* Did already sync */ 385 /* Did already sync */
@@ -390,6 +398,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
390 *regs = *eregs; 398 *regs = *eregs;
391 return regs; 399 return regs;
392} 400}
401NOKPROBE_SYMBOL(sync_regs);
393#endif 402#endif
394 403
395/* 404/*
@@ -416,7 +425,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
416 * 425 *
417 * May run on IST stack. 426 * May run on IST stack.
418 */ 427 */
419dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 428dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
420{ 429{
421 struct task_struct *tsk = current; 430 struct task_struct *tsk = current;
422 enum ctx_state prev_state; 431 enum ctx_state prev_state;
@@ -424,8 +433,6 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
424 unsigned long dr6; 433 unsigned long dr6;
425 int si_code; 434 int si_code;
426 435
427 prev_state = exception_enter();
428
429 get_debugreg(dr6, 6); 436 get_debugreg(dr6, 6);
430 437
431 /* Filter out all the reserved bits which are preset to 1 */ 438 /* Filter out all the reserved bits which are preset to 1 */
@@ -454,6 +461,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
454 /* Store the virtualized DR6 value */ 461 /* Store the virtualized DR6 value */
455 tsk->thread.debugreg6 = dr6; 462 tsk->thread.debugreg6 = dr6;
456 463
464#ifdef CONFIG_KPROBES
465 if (kprobe_debug_handler(regs))
466 goto exit;
467#endif
468 prev_state = exception_enter();
469
457 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, 470 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
458 SIGTRAP) == NOTIFY_STOP) 471 SIGTRAP) == NOTIFY_STOP)
459 goto exit; 472 goto exit;
@@ -496,6 +509,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
496exit: 509exit:
497 exception_exit(prev_state); 510 exception_exit(prev_state);
498} 511}
512NOKPROBE_SYMBOL(do_debug);
499 513
500/* 514/*
501 * Note that we play around with the 'TS' bit in an attempt to get 515 * Note that we play around with the 'TS' bit in an attempt to get
@@ -667,7 +681,7 @@ void math_state_restore(void)
667} 681}
668EXPORT_SYMBOL_GPL(math_state_restore); 682EXPORT_SYMBOL_GPL(math_state_restore);
669 683
670dotraplinkage void __kprobes 684dotraplinkage void
671do_device_not_available(struct pt_regs *regs, long error_code) 685do_device_not_available(struct pt_regs *regs, long error_code)
672{ 686{
673 enum ctx_state prev_state; 687 enum ctx_state prev_state;
@@ -693,6 +707,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
693#endif 707#endif
694 exception_exit(prev_state); 708 exception_exit(prev_state);
695} 709}
710NOKPROBE_SYMBOL(do_device_not_available);
696 711
697#ifdef CONFIG_X86_32 712#ifdef CONFIG_X86_32
698dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 713dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 2930ae05d773..28f85c916712 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -4,8 +4,8 @@
4 * (inspired by Andi Kleen's thunk_64.S) 4 * (inspired by Andi Kleen's thunk_64.S)
5 * Subject to the GNU public license, v.2. No warranty of any kind. 5 * Subject to the GNU public license, v.2. No warranty of any kind.
6 */ 6 */
7
8 #include <linux/linkage.h> 7 #include <linux/linkage.h>
8 #include <asm/asm.h>
9 9
10#ifdef CONFIG_TRACE_IRQFLAGS 10#ifdef CONFIG_TRACE_IRQFLAGS
11 /* put return address in eax (arg1) */ 11 /* put return address in eax (arg1) */
@@ -22,6 +22,7 @@
22 popl %ecx 22 popl %ecx
23 popl %eax 23 popl %eax
24 ret 24 ret
25 _ASM_NOKPROBE(\name)
25 .endm 26 .endm
26 27
27 thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller 28 thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index a63efd6bb6a5..92d9feaff42b 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -8,6 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h> 9#include <asm/dwarf2.h>
10#include <asm/calling.h> 10#include <asm/calling.h>
11#include <asm/asm.h>
11 12
12 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ 13 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
13 .macro THUNK name, func, put_ret_addr_in_rdi=0 14 .macro THUNK name, func, put_ret_addr_in_rdi=0
@@ -25,6 +26,7 @@
25 call \func 26 call \func
26 jmp restore 27 jmp restore
27 CFI_ENDPROC 28 CFI_ENDPROC
29 _ASM_NOKPROBE(\name)
28 .endm 30 .endm
29 31
30#ifdef CONFIG_TRACE_IRQFLAGS 32#ifdef CONFIG_TRACE_IRQFLAGS
@@ -43,3 +45,4 @@ restore:
43 RESTORE_ARGS 45 RESTORE_ARGS
44 ret 46 ret
45 CFI_ENDPROC 47 CFI_ENDPROC
48 _ASM_NOKPROBE(restore)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8e5722992677..f83bd0de5eef 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -8,7 +8,7 @@
8#include <linux/kdebug.h> /* oops_begin/end, ... */ 8#include <linux/kdebug.h> /* oops_begin/end, ... */
9#include <linux/module.h> /* search_exception_table */ 9#include <linux/module.h> /* search_exception_table */
10#include <linux/bootmem.h> /* max_low_pfn */ 10#include <linux/bootmem.h> /* max_low_pfn */
11#include <linux/kprobes.h> /* __kprobes, ... */ 11#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
12#include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12#include <linux/mmiotrace.h> /* kmmio_handler, ... */
13#include <linux/perf_event.h> /* perf_sw_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14#include <linux/hugetlb.h> /* hstate_index_to_shift */ 14#include <linux/hugetlb.h> /* hstate_index_to_shift */
@@ -45,7 +45,7 @@ enum x86_pf_error_code {
45 * Returns 0 if mmiotrace is disabled, or if the fault is not 45 * Returns 0 if mmiotrace is disabled, or if the fault is not
46 * handled by mmiotrace: 46 * handled by mmiotrace:
47 */ 47 */
48static inline int __kprobes 48static nokprobe_inline int
49kmmio_fault(struct pt_regs *regs, unsigned long addr) 49kmmio_fault(struct pt_regs *regs, unsigned long addr)
50{ 50{
51 if (unlikely(is_kmmio_active())) 51 if (unlikely(is_kmmio_active()))
@@ -54,7 +54,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
54 return 0; 54 return 0;
55} 55}
56 56
57static inline int __kprobes kprobes_fault(struct pt_regs *regs) 57static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
58{ 58{
59 int ret = 0; 59 int ret = 0;
60 60
@@ -261,7 +261,7 @@ void vmalloc_sync_all(void)
261 * 261 *
262 * Handle a fault on the vmalloc or module mapping area 262 * Handle a fault on the vmalloc or module mapping area
263 */ 263 */
264static noinline __kprobes int vmalloc_fault(unsigned long address) 264static noinline int vmalloc_fault(unsigned long address)
265{ 265{
266 unsigned long pgd_paddr; 266 unsigned long pgd_paddr;
267 pmd_t *pmd_k; 267 pmd_t *pmd_k;
@@ -291,6 +291,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
291 291
292 return 0; 292 return 0;
293} 293}
294NOKPROBE_SYMBOL(vmalloc_fault);
294 295
295/* 296/*
296 * Did it hit the DOS screen memory VA from vm86 mode? 297 * Did it hit the DOS screen memory VA from vm86 mode?
@@ -358,7 +359,7 @@ void vmalloc_sync_all(void)
358 * 359 *
359 * This assumes no large pages in there. 360 * This assumes no large pages in there.
360 */ 361 */
361static noinline __kprobes int vmalloc_fault(unsigned long address) 362static noinline int vmalloc_fault(unsigned long address)
362{ 363{
363 pgd_t *pgd, *pgd_ref; 364 pgd_t *pgd, *pgd_ref;
364 pud_t *pud, *pud_ref; 365 pud_t *pud, *pud_ref;
@@ -425,6 +426,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
425 426
426 return 0; 427 return 0;
427} 428}
429NOKPROBE_SYMBOL(vmalloc_fault);
428 430
429#ifdef CONFIG_CPU_SUP_AMD 431#ifdef CONFIG_CPU_SUP_AMD
430static const char errata93_warning[] = 432static const char errata93_warning[] =
@@ -927,7 +929,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
927 * There are no security implications to leaving a stale TLB when 929 * There are no security implications to leaving a stale TLB when
928 * increasing the permissions on a page. 930 * increasing the permissions on a page.
929 */ 931 */
930static noinline __kprobes int 932static noinline int
931spurious_fault(unsigned long error_code, unsigned long address) 933spurious_fault(unsigned long error_code, unsigned long address)
932{ 934{
933 pgd_t *pgd; 935 pgd_t *pgd;
@@ -975,6 +977,7 @@ spurious_fault(unsigned long error_code, unsigned long address)
975 977
976 return ret; 978 return ret;
977} 979}
980NOKPROBE_SYMBOL(spurious_fault);
978 981
979int show_unhandled_signals = 1; 982int show_unhandled_signals = 1;
980 983
@@ -1030,7 +1033,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
1030 * {,trace_}do_page_fault() have notrace on. Having this an actual function 1033 * {,trace_}do_page_fault() have notrace on. Having this an actual function
1031 * guarantees there's a function trace entry. 1034 * guarantees there's a function trace entry.
1032 */ 1035 */
1033static void __kprobes noinline 1036static noinline void
1034__do_page_fault(struct pt_regs *regs, unsigned long error_code, 1037__do_page_fault(struct pt_regs *regs, unsigned long error_code,
1035 unsigned long address) 1038 unsigned long address)
1036{ 1039{
@@ -1253,8 +1256,9 @@ good_area:
1253 1256
1254 up_read(&mm->mmap_sem); 1257 up_read(&mm->mmap_sem);
1255} 1258}
1259NOKPROBE_SYMBOL(__do_page_fault);
1256 1260
1257dotraplinkage void __kprobes notrace 1261dotraplinkage void notrace
1258do_page_fault(struct pt_regs *regs, unsigned long error_code) 1262do_page_fault(struct pt_regs *regs, unsigned long error_code)
1259{ 1263{
1260 unsigned long address = read_cr2(); /* Get the faulting address */ 1264 unsigned long address = read_cr2(); /* Get the faulting address */
@@ -1272,10 +1276,12 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1272 __do_page_fault(regs, error_code, address); 1276 __do_page_fault(regs, error_code, address);
1273 exception_exit(prev_state); 1277 exception_exit(prev_state);
1274} 1278}
1279NOKPROBE_SYMBOL(do_page_fault);
1275 1280
1276#ifdef CONFIG_TRACING 1281#ifdef CONFIG_TRACING
1277static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1282static nokprobe_inline void
1278 unsigned long error_code) 1283trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1284 unsigned long error_code)
1279{ 1285{
1280 if (user_mode(regs)) 1286 if (user_mode(regs))
1281 trace_page_fault_user(address, regs, error_code); 1287 trace_page_fault_user(address, regs, error_code);
@@ -1283,7 +1289,7 @@ static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs
1283 trace_page_fault_kernel(address, regs, error_code); 1289 trace_page_fault_kernel(address, regs, error_code);
1284} 1290}
1285 1291
1286dotraplinkage void __kprobes notrace 1292dotraplinkage void notrace
1287trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1293trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1288{ 1294{
1289 /* 1295 /*
@@ -1300,4 +1306,5 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
1300 __do_page_fault(regs, error_code, address); 1306 __do_page_fault(regs, error_code, address);
1301 exception_exit(prev_state); 1307 exception_exit(prev_state);
1302} 1308}
1309NOKPROBE_SYMBOL(trace_do_page_fault);
1303#endif /* CONFIG_TRACING */ 1310#endif /* CONFIG_TRACING */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 146e4fffd710..8e0204a68c74 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -109,6 +109,15 @@
109#define BRANCH_PROFILE() 109#define BRANCH_PROFILE()
110#endif 110#endif
111 111
112#ifdef CONFIG_KPROBES
113#define KPROBE_BLACKLIST() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
115 *(_kprobe_blacklist) \
116 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
117#else
118#define KPROBE_BLACKLIST()
119#endif
120
112#ifdef CONFIG_EVENT_TRACING 121#ifdef CONFIG_EVENT_TRACING
113#define FTRACE_EVENTS() . = ALIGN(8); \ 122#define FTRACE_EVENTS() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 123 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
@@ -507,6 +516,7 @@
507 *(.init.rodata) \ 516 *(.init.rodata) \
508 FTRACE_EVENTS() \ 517 FTRACE_EVENTS() \
509 TRACE_SYSCALLS() \ 518 TRACE_SYSCALLS() \
519 KPROBE_BLACKLIST() \
510 MEM_DISCARD(init.rodata) \ 520 MEM_DISCARD(init.rodata) \
511 CLK_OF_TABLES() \ 521 CLK_OF_TABLES() \
512 RESERVEDMEM_OF_TABLES() \ 522 RESERVEDMEM_OF_TABLES() \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index ee7239ea1583..0300c0f5c88b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -374,7 +374,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
374/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ 374/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
375#ifdef CONFIG_KPROBES 375#ifdef CONFIG_KPROBES
376# define __kprobes __attribute__((__section__(".kprobes.text"))) 376# define __kprobes __attribute__((__section__(".kprobes.text")))
377# define nokprobe_inline __always_inline
377#else 378#else
378# define __kprobes 379# define __kprobes
380# define nokprobe_inline inline
379#endif 381#endif
380#endif /* __LINUX_COMPILER_H */ 382#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 925eaf28fca9..e059507c465d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -205,10 +205,10 @@ struct kretprobe_blackpoint {
205 void *addr; 205 void *addr;
206}; 206};
207 207
208struct kprobe_blackpoint { 208struct kprobe_blacklist_entry {
209 const char *name; 209 struct list_head list;
210 unsigned long start_addr; 210 unsigned long start_addr;
211 unsigned long range; 211 unsigned long end_addr;
212}; 212};
213 213
214#ifdef CONFIG_KPROBES 214#ifdef CONFIG_KPROBES
@@ -265,6 +265,7 @@ extern void arch_disarm_kprobe(struct kprobe *p);
265extern int arch_init_kprobes(void); 265extern int arch_init_kprobes(void);
266extern void show_registers(struct pt_regs *regs); 266extern void show_registers(struct pt_regs *regs);
267extern void kprobes_inc_nmissed_count(struct kprobe *p); 267extern void kprobes_inc_nmissed_count(struct kprobe *p);
268extern bool arch_within_kprobe_blacklist(unsigned long addr);
268 269
269struct kprobe_insn_cache { 270struct kprobe_insn_cache {
270 struct mutex mutex; 271 struct mutex mutex;
@@ -476,4 +477,18 @@ static inline int enable_jprobe(struct jprobe *jp)
476 return enable_kprobe(&jp->kp); 477 return enable_kprobe(&jp->kp);
477} 478}
478 479
480#ifdef CONFIG_KPROBES
481/*
482 * Blacklist ganerating macro. Specify functions which is not probed
483 * by using this macro.
484 */
485#define __NOKPROBE_SYMBOL(fname) \
486static unsigned long __used \
487 __attribute__((section("_kprobe_blacklist"))) \
488 _kbl_addr_##fname = (unsigned long)fname;
489#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
490#else
491#define NOKPROBE_SYMBOL(fname)
492#endif
493
479#endif /* _LINUX_KPROBES_H */ 494#endif /* _LINUX_KPROBES_H */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ceeadfcabb76..3214289df5a7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -86,21 +86,8 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
86 return &(kretprobe_table_locks[hash].lock); 86 return &(kretprobe_table_locks[hash].lock);
87} 87}
88 88
89/* 89/* Blacklist -- list of struct kprobe_blacklist_entry */
90 * Normally, functions that we'd want to prohibit kprobes in, are marked 90static LIST_HEAD(kprobe_blacklist);
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
93 *
94 * For such cases, we now have a blacklist
95 */
96static struct kprobe_blackpoint kprobe_blacklist[] = {
97 {"preempt_schedule",},
98 {"native_get_debugreg",},
99 {"irq_entries_start",},
100 {"common_interrupt",},
101 {"mcount",}, /* mcount can be called from everywhere */
102 {NULL} /* Terminator */
103};
104 91
105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 92#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
106/* 93/*
@@ -151,13 +138,13 @@ struct kprobe_insn_cache kprobe_insn_slots = {
151 .insn_size = MAX_INSN_SIZE, 138 .insn_size = MAX_INSN_SIZE,
152 .nr_garbage = 0, 139 .nr_garbage = 0,
153}; 140};
154static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); 141static int collect_garbage_slots(struct kprobe_insn_cache *c);
155 142
156/** 143/**
157 * __get_insn_slot() - Find a slot on an executable page for an instruction. 144 * __get_insn_slot() - Find a slot on an executable page for an instruction.
158 * We allocate an executable page if there's no room on existing ones. 145 * We allocate an executable page if there's no room on existing ones.
159 */ 146 */
160kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 147kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
161{ 148{
162 struct kprobe_insn_page *kip; 149 struct kprobe_insn_page *kip;
163 kprobe_opcode_t *slot = NULL; 150 kprobe_opcode_t *slot = NULL;
@@ -214,7 +201,7 @@ out:
214} 201}
215 202
216/* Return 1 if all garbages are collected, otherwise 0. */ 203/* Return 1 if all garbages are collected, otherwise 0. */
217static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 204static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
218{ 205{
219 kip->slot_used[idx] = SLOT_CLEAN; 206 kip->slot_used[idx] = SLOT_CLEAN;
220 kip->nused--; 207 kip->nused--;
@@ -235,7 +222,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
235 return 0; 222 return 0;
236} 223}
237 224
238static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) 225static int collect_garbage_slots(struct kprobe_insn_cache *c)
239{ 226{
240 struct kprobe_insn_page *kip, *next; 227 struct kprobe_insn_page *kip, *next;
241 228
@@ -257,8 +244,8 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
257 return 0; 244 return 0;
258} 245}
259 246
260void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 247void __free_insn_slot(struct kprobe_insn_cache *c,
261 kprobe_opcode_t *slot, int dirty) 248 kprobe_opcode_t *slot, int dirty)
262{ 249{
263 struct kprobe_insn_page *kip; 250 struct kprobe_insn_page *kip;
264 251
@@ -314,7 +301,7 @@ static inline void reset_kprobe_instance(void)
314 * OR 301 * OR
315 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 302 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
316 */ 303 */
317struct kprobe __kprobes *get_kprobe(void *addr) 304struct kprobe *get_kprobe(void *addr)
318{ 305{
319 struct hlist_head *head; 306 struct hlist_head *head;
320 struct kprobe *p; 307 struct kprobe *p;
@@ -327,8 +314,9 @@ struct kprobe __kprobes *get_kprobe(void *addr)
327 314
328 return NULL; 315 return NULL;
329} 316}
317NOKPROBE_SYMBOL(get_kprobe);
330 318
331static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 319static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
332 320
333/* Return true if the kprobe is an aggregator */ 321/* Return true if the kprobe is an aggregator */
334static inline int kprobe_aggrprobe(struct kprobe *p) 322static inline int kprobe_aggrprobe(struct kprobe *p)
@@ -360,7 +348,7 @@ static bool kprobes_allow_optimization;
360 * Call all pre_handler on the list, but ignores its return value. 348 * Call all pre_handler on the list, but ignores its return value.
361 * This must be called from arch-dep optimized caller. 349 * This must be called from arch-dep optimized caller.
362 */ 350 */
363void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 351void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
364{ 352{
365 struct kprobe *kp; 353 struct kprobe *kp;
366 354
@@ -372,9 +360,10 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
372 reset_kprobe_instance(); 360 reset_kprobe_instance();
373 } 361 }
374} 362}
363NOKPROBE_SYMBOL(opt_pre_handler);
375 364
376/* Free optimized instructions and optimized_kprobe */ 365/* Free optimized instructions and optimized_kprobe */
377static __kprobes void free_aggr_kprobe(struct kprobe *p) 366static void free_aggr_kprobe(struct kprobe *p)
378{ 367{
379 struct optimized_kprobe *op; 368 struct optimized_kprobe *op;
380 369
@@ -412,7 +401,7 @@ static inline int kprobe_disarmed(struct kprobe *p)
412} 401}
413 402
414/* Return true(!0) if the probe is queued on (un)optimizing lists */ 403/* Return true(!0) if the probe is queued on (un)optimizing lists */
415static int __kprobes kprobe_queued(struct kprobe *p) 404static int kprobe_queued(struct kprobe *p)
416{ 405{
417 struct optimized_kprobe *op; 406 struct optimized_kprobe *op;
418 407
@@ -428,7 +417,7 @@ static int __kprobes kprobe_queued(struct kprobe *p)
428 * Return an optimized kprobe whose optimizing code replaces 417 * Return an optimized kprobe whose optimizing code replaces
429 * instructions including addr (exclude breakpoint). 418 * instructions including addr (exclude breakpoint).
430 */ 419 */
431static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 420static struct kprobe *get_optimized_kprobe(unsigned long addr)
432{ 421{
433 int i; 422 int i;
434 struct kprobe *p = NULL; 423 struct kprobe *p = NULL;
@@ -460,7 +449,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
460 * Optimize (replace a breakpoint with a jump) kprobes listed on 449 * Optimize (replace a breakpoint with a jump) kprobes listed on
461 * optimizing_list. 450 * optimizing_list.
462 */ 451 */
463static __kprobes void do_optimize_kprobes(void) 452static void do_optimize_kprobes(void)
464{ 453{
465 /* Optimization never be done when disarmed */ 454 /* Optimization never be done when disarmed */
466 if (kprobes_all_disarmed || !kprobes_allow_optimization || 455 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
@@ -488,7 +477,7 @@ static __kprobes void do_optimize_kprobes(void)
488 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 477 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
489 * if need) kprobes listed on unoptimizing_list. 478 * if need) kprobes listed on unoptimizing_list.
490 */ 479 */
491static __kprobes void do_unoptimize_kprobes(void) 480static void do_unoptimize_kprobes(void)
492{ 481{
493 struct optimized_kprobe *op, *tmp; 482 struct optimized_kprobe *op, *tmp;
494 483
@@ -520,7 +509,7 @@ static __kprobes void do_unoptimize_kprobes(void)
520} 509}
521 510
522/* Reclaim all kprobes on the free_list */ 511/* Reclaim all kprobes on the free_list */
523static __kprobes void do_free_cleaned_kprobes(void) 512static void do_free_cleaned_kprobes(void)
524{ 513{
525 struct optimized_kprobe *op, *tmp; 514 struct optimized_kprobe *op, *tmp;
526 515
@@ -532,13 +521,13 @@ static __kprobes void do_free_cleaned_kprobes(void)
532} 521}
533 522
534/* Start optimizer after OPTIMIZE_DELAY passed */ 523/* Start optimizer after OPTIMIZE_DELAY passed */
535static __kprobes void kick_kprobe_optimizer(void) 524static void kick_kprobe_optimizer(void)
536{ 525{
537 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 526 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
538} 527}
539 528
540/* Kprobe jump optimizer */ 529/* Kprobe jump optimizer */
541static __kprobes void kprobe_optimizer(struct work_struct *work) 530static void kprobe_optimizer(struct work_struct *work)
542{ 531{
543 mutex_lock(&kprobe_mutex); 532 mutex_lock(&kprobe_mutex);
544 /* Lock modules while optimizing kprobes */ 533 /* Lock modules while optimizing kprobes */
@@ -574,7 +563,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
574} 563}
575 564
576/* Wait for completing optimization and unoptimization */ 565/* Wait for completing optimization and unoptimization */
577static __kprobes void wait_for_kprobe_optimizer(void) 566static void wait_for_kprobe_optimizer(void)
578{ 567{
579 mutex_lock(&kprobe_mutex); 568 mutex_lock(&kprobe_mutex);
580 569
@@ -593,7 +582,7 @@ static __kprobes void wait_for_kprobe_optimizer(void)
593} 582}
594 583
595/* Optimize kprobe if p is ready to be optimized */ 584/* Optimize kprobe if p is ready to be optimized */
596static __kprobes void optimize_kprobe(struct kprobe *p) 585static void optimize_kprobe(struct kprobe *p)
597{ 586{
598 struct optimized_kprobe *op; 587 struct optimized_kprobe *op;
599 588
@@ -627,7 +616,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
627} 616}
628 617
629/* Short cut to direct unoptimizing */ 618/* Short cut to direct unoptimizing */
630static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) 619static void force_unoptimize_kprobe(struct optimized_kprobe *op)
631{ 620{
632 get_online_cpus(); 621 get_online_cpus();
633 arch_unoptimize_kprobe(op); 622 arch_unoptimize_kprobe(op);
@@ -637,7 +626,7 @@ static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
637} 626}
638 627
639/* Unoptimize a kprobe if p is optimized */ 628/* Unoptimize a kprobe if p is optimized */
640static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) 629static void unoptimize_kprobe(struct kprobe *p, bool force)
641{ 630{
642 struct optimized_kprobe *op; 631 struct optimized_kprobe *op;
643 632
@@ -697,7 +686,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
697} 686}
698 687
699/* Remove optimized instructions */ 688/* Remove optimized instructions */
700static void __kprobes kill_optimized_kprobe(struct kprobe *p) 689static void kill_optimized_kprobe(struct kprobe *p)
701{ 690{
702 struct optimized_kprobe *op; 691 struct optimized_kprobe *op;
703 692
@@ -723,7 +712,7 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
723} 712}
724 713
725/* Try to prepare optimized instructions */ 714/* Try to prepare optimized instructions */
726static __kprobes void prepare_optimized_kprobe(struct kprobe *p) 715static void prepare_optimized_kprobe(struct kprobe *p)
727{ 716{
728 struct optimized_kprobe *op; 717 struct optimized_kprobe *op;
729 718
@@ -732,7 +721,7 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
732} 721}
733 722
734/* Allocate new optimized_kprobe and try to prepare optimized instructions */ 723/* Allocate new optimized_kprobe and try to prepare optimized instructions */
735static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 724static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
736{ 725{
737 struct optimized_kprobe *op; 726 struct optimized_kprobe *op;
738 727
@@ -747,13 +736,13 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
747 return &op->kp; 736 return &op->kp;
748} 737}
749 738
750static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 739static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
751 740
752/* 741/*
753 * Prepare an optimized_kprobe and optimize it 742 * Prepare an optimized_kprobe and optimize it
754 * NOTE: p must be a normal registered kprobe 743 * NOTE: p must be a normal registered kprobe
755 */ 744 */
756static __kprobes void try_to_optimize_kprobe(struct kprobe *p) 745static void try_to_optimize_kprobe(struct kprobe *p)
757{ 746{
758 struct kprobe *ap; 747 struct kprobe *ap;
759 struct optimized_kprobe *op; 748 struct optimized_kprobe *op;
@@ -787,7 +776,7 @@ out:
787} 776}
788 777
789#ifdef CONFIG_SYSCTL 778#ifdef CONFIG_SYSCTL
790static void __kprobes optimize_all_kprobes(void) 779static void optimize_all_kprobes(void)
791{ 780{
792 struct hlist_head *head; 781 struct hlist_head *head;
793 struct kprobe *p; 782 struct kprobe *p;
@@ -810,7 +799,7 @@ out:
810 mutex_unlock(&kprobe_mutex); 799 mutex_unlock(&kprobe_mutex);
811} 800}
812 801
813static void __kprobes unoptimize_all_kprobes(void) 802static void unoptimize_all_kprobes(void)
814{ 803{
815 struct hlist_head *head; 804 struct hlist_head *head;
816 struct kprobe *p; 805 struct kprobe *p;
@@ -861,7 +850,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
861#endif /* CONFIG_SYSCTL */ 850#endif /* CONFIG_SYSCTL */
862 851
863/* Put a breakpoint for a probe. Must be called with text_mutex locked */ 852/* Put a breakpoint for a probe. Must be called with text_mutex locked */
864static void __kprobes __arm_kprobe(struct kprobe *p) 853static void __arm_kprobe(struct kprobe *p)
865{ 854{
866 struct kprobe *_p; 855 struct kprobe *_p;
867 856
@@ -876,7 +865,7 @@ static void __kprobes __arm_kprobe(struct kprobe *p)
876} 865}
877 866
878/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 867/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
879static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) 868static void __disarm_kprobe(struct kprobe *p, bool reopt)
880{ 869{
881 struct kprobe *_p; 870 struct kprobe *_p;
882 871
@@ -911,13 +900,13 @@ static void reuse_unused_kprobe(struct kprobe *ap)
911 BUG_ON(kprobe_unused(ap)); 900 BUG_ON(kprobe_unused(ap));
912} 901}
913 902
914static __kprobes void free_aggr_kprobe(struct kprobe *p) 903static void free_aggr_kprobe(struct kprobe *p)
915{ 904{
916 arch_remove_kprobe(p); 905 arch_remove_kprobe(p);
917 kfree(p); 906 kfree(p);
918} 907}
919 908
920static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 909static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
921{ 910{
922 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 911 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
923} 912}
@@ -931,7 +920,7 @@ static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
931static int kprobe_ftrace_enabled; 920static int kprobe_ftrace_enabled;
932 921
933/* Must ensure p->addr is really on ftrace */ 922/* Must ensure p->addr is really on ftrace */
934static int __kprobes prepare_kprobe(struct kprobe *p) 923static int prepare_kprobe(struct kprobe *p)
935{ 924{
936 if (!kprobe_ftrace(p)) 925 if (!kprobe_ftrace(p))
937 return arch_prepare_kprobe(p); 926 return arch_prepare_kprobe(p);
@@ -940,7 +929,7 @@ static int __kprobes prepare_kprobe(struct kprobe *p)
940} 929}
941 930
942/* Caller must lock kprobe_mutex */ 931/* Caller must lock kprobe_mutex */
943static void __kprobes arm_kprobe_ftrace(struct kprobe *p) 932static void arm_kprobe_ftrace(struct kprobe *p)
944{ 933{
945 int ret; 934 int ret;
946 935
@@ -955,7 +944,7 @@ static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
955} 944}
956 945
957/* Caller must lock kprobe_mutex */ 946/* Caller must lock kprobe_mutex */
958static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) 947static void disarm_kprobe_ftrace(struct kprobe *p)
959{ 948{
960 int ret; 949 int ret;
961 950
@@ -975,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
975#endif 964#endif
976 965
977/* Arm a kprobe with text_mutex */ 966/* Arm a kprobe with text_mutex */
978static void __kprobes arm_kprobe(struct kprobe *kp) 967static void arm_kprobe(struct kprobe *kp)
979{ 968{
980 if (unlikely(kprobe_ftrace(kp))) { 969 if (unlikely(kprobe_ftrace(kp))) {
981 arm_kprobe_ftrace(kp); 970 arm_kprobe_ftrace(kp);
@@ -992,7 +981,7 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
992} 981}
993 982
994/* Disarm a kprobe with text_mutex */ 983/* Disarm a kprobe with text_mutex */
995static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) 984static void disarm_kprobe(struct kprobe *kp, bool reopt)
996{ 985{
997 if (unlikely(kprobe_ftrace(kp))) { 986 if (unlikely(kprobe_ftrace(kp))) {
998 disarm_kprobe_ftrace(kp); 987 disarm_kprobe_ftrace(kp);
@@ -1008,7 +997,7 @@ static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
1008 * Aggregate handlers for multiple kprobes support - these handlers 997 * Aggregate handlers for multiple kprobes support - these handlers
1009 * take care of invoking the individual kprobe handlers on p->list 998 * take care of invoking the individual kprobe handlers on p->list
1010 */ 999 */
1011static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1000static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1012{ 1001{
1013 struct kprobe *kp; 1002 struct kprobe *kp;
1014 1003
@@ -1022,9 +1011,10 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1022 } 1011 }
1023 return 0; 1012 return 0;
1024} 1013}
1014NOKPROBE_SYMBOL(aggr_pre_handler);
1025 1015
1026static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1016static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1027 unsigned long flags) 1017 unsigned long flags)
1028{ 1018{
1029 struct kprobe *kp; 1019 struct kprobe *kp;
1030 1020
@@ -1036,9 +1026,10 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1036 } 1026 }
1037 } 1027 }
1038} 1028}
1029NOKPROBE_SYMBOL(aggr_post_handler);
1039 1030
1040static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1031static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1041 int trapnr) 1032 int trapnr)
1042{ 1033{
1043 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1034 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1044 1035
@@ -1052,8 +1043,9 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1052 } 1043 }
1053 return 0; 1044 return 0;
1054} 1045}
1046NOKPROBE_SYMBOL(aggr_fault_handler);
1055 1047
1056static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 1048static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1057{ 1049{
1058 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1050 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1059 int ret = 0; 1051 int ret = 0;
@@ -1065,9 +1057,10 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1065 reset_kprobe_instance(); 1057 reset_kprobe_instance();
1066 return ret; 1058 return ret;
1067} 1059}
1060NOKPROBE_SYMBOL(aggr_break_handler);
1068 1061
1069/* Walks the list and increments nmissed count for multiprobe case */ 1062/* Walks the list and increments nmissed count for multiprobe case */
1070void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 1063void kprobes_inc_nmissed_count(struct kprobe *p)
1071{ 1064{
1072 struct kprobe *kp; 1065 struct kprobe *kp;
1073 if (!kprobe_aggrprobe(p)) { 1066 if (!kprobe_aggrprobe(p)) {
@@ -1078,9 +1071,10 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1078 } 1071 }
1079 return; 1072 return;
1080} 1073}
1074NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1081 1075
1082void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 1076void recycle_rp_inst(struct kretprobe_instance *ri,
1083 struct hlist_head *head) 1077 struct hlist_head *head)
1084{ 1078{
1085 struct kretprobe *rp = ri->rp; 1079 struct kretprobe *rp = ri->rp;
1086 1080
@@ -1095,8 +1089,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1095 /* Unregistering */ 1089 /* Unregistering */
1096 hlist_add_head(&ri->hlist, head); 1090 hlist_add_head(&ri->hlist, head);
1097} 1091}
1092NOKPROBE_SYMBOL(recycle_rp_inst);
1098 1093
1099void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 1094void kretprobe_hash_lock(struct task_struct *tsk,
1100 struct hlist_head **head, unsigned long *flags) 1095 struct hlist_head **head, unsigned long *flags)
1101__acquires(hlist_lock) 1096__acquires(hlist_lock)
1102{ 1097{
@@ -1107,17 +1102,19 @@ __acquires(hlist_lock)
1107 hlist_lock = kretprobe_table_lock_ptr(hash); 1102 hlist_lock = kretprobe_table_lock_ptr(hash);
1108 raw_spin_lock_irqsave(hlist_lock, *flags); 1103 raw_spin_lock_irqsave(hlist_lock, *flags);
1109} 1104}
1105NOKPROBE_SYMBOL(kretprobe_hash_lock);
1110 1106
1111static void __kprobes kretprobe_table_lock(unsigned long hash, 1107static void kretprobe_table_lock(unsigned long hash,
1112 unsigned long *flags) 1108 unsigned long *flags)
1113__acquires(hlist_lock) 1109__acquires(hlist_lock)
1114{ 1110{
1115 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1111 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1116 raw_spin_lock_irqsave(hlist_lock, *flags); 1112 raw_spin_lock_irqsave(hlist_lock, *flags);
1117} 1113}
1114NOKPROBE_SYMBOL(kretprobe_table_lock);
1118 1115
1119void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 1116void kretprobe_hash_unlock(struct task_struct *tsk,
1120 unsigned long *flags) 1117 unsigned long *flags)
1121__releases(hlist_lock) 1118__releases(hlist_lock)
1122{ 1119{
1123 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1120 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
@@ -1126,14 +1123,16 @@ __releases(hlist_lock)
1126 hlist_lock = kretprobe_table_lock_ptr(hash); 1123 hlist_lock = kretprobe_table_lock_ptr(hash);
1127 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1124 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1128} 1125}
1126NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1129 1127
1130static void __kprobes kretprobe_table_unlock(unsigned long hash, 1128static void kretprobe_table_unlock(unsigned long hash,
1131 unsigned long *flags) 1129 unsigned long *flags)
1132__releases(hlist_lock) 1130__releases(hlist_lock)
1133{ 1131{
1134 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1132 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1135 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1133 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1136} 1134}
1135NOKPROBE_SYMBOL(kretprobe_table_unlock);
1137 1136
1138/* 1137/*
1139 * This function is called from finish_task_switch when task tk becomes dead, 1138 * This function is called from finish_task_switch when task tk becomes dead,
@@ -1141,7 +1140,7 @@ __releases(hlist_lock)
1141 * with this task. These left over instances represent probed functions 1140 * with this task. These left over instances represent probed functions
1142 * that have been called but will never return. 1141 * that have been called but will never return.
1143 */ 1142 */
1144void __kprobes kprobe_flush_task(struct task_struct *tk) 1143void kprobe_flush_task(struct task_struct *tk)
1145{ 1144{
1146 struct kretprobe_instance *ri; 1145 struct kretprobe_instance *ri;
1147 struct hlist_head *head, empty_rp; 1146 struct hlist_head *head, empty_rp;
@@ -1166,6 +1165,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1166 kfree(ri); 1165 kfree(ri);
1167 } 1166 }
1168} 1167}
1168NOKPROBE_SYMBOL(kprobe_flush_task);
1169 1169
1170static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1171{ 1171{
@@ -1178,7 +1178,7 @@ static inline void free_rp_inst(struct kretprobe *rp)
1178 } 1178 }
1179} 1179}
1180 1180
1181static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 1181static void cleanup_rp_inst(struct kretprobe *rp)
1182{ 1182{
1183 unsigned long flags, hash; 1183 unsigned long flags, hash;
1184 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
@@ -1197,12 +1197,13 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1197 } 1197 }
1198 free_rp_inst(rp); 1198 free_rp_inst(rp);
1199} 1199}
1200NOKPROBE_SYMBOL(cleanup_rp_inst);
1200 1201
1201/* 1202/*
1202* Add the new probe to ap->list. Fail if this is the 1203* Add the new probe to ap->list. Fail if this is the
1203* second jprobe at the address - two jprobes can't coexist 1204* second jprobe at the address - two jprobes can't coexist
1204*/ 1205*/
1205static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1206static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1206{ 1207{
1207 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1208 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1208 1209
@@ -1226,7 +1227,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1226 * Fill in the required fields of the "manager kprobe". Replace the 1227 * Fill in the required fields of the "manager kprobe". Replace the
1227 * earlier kprobe in the hlist with the manager kprobe 1228 * earlier kprobe in the hlist with the manager kprobe
1228 */ 1229 */
1229static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1230static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1230{ 1231{
1231 /* Copy p's insn slot to ap */ 1232 /* Copy p's insn slot to ap */
1232 copy_kprobe(p, ap); 1233 copy_kprobe(p, ap);
@@ -1252,8 +1253,7 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1252 * This is the second or subsequent kprobe at the address - handle 1253 * This is the second or subsequent kprobe at the address - handle
1253 * the intricacies 1254 * the intricacies
1254 */ 1255 */
1255static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, 1256static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1256 struct kprobe *p)
1257{ 1257{
1258 int ret = 0; 1258 int ret = 0;
1259 struct kprobe *ap = orig_p; 1259 struct kprobe *ap = orig_p;
@@ -1324,25 +1324,29 @@ out:
1324 return ret; 1324 return ret;
1325} 1325}
1326 1326
1327static int __kprobes in_kprobes_functions(unsigned long addr) 1327bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1328{ 1328{
1329 struct kprobe_blackpoint *kb; 1329 /* The __kprobes marked functions and entry code must not be probed */
1330 return addr >= (unsigned long)__kprobes_text_start &&
1331 addr < (unsigned long)__kprobes_text_end;
1332}
1330 1333
1331 if (addr >= (unsigned long)__kprobes_text_start && 1334static bool within_kprobe_blacklist(unsigned long addr)
1332 addr < (unsigned long)__kprobes_text_end) 1335{
1333 return -EINVAL; 1336 struct kprobe_blacklist_entry *ent;
1337
1338 if (arch_within_kprobe_blacklist(addr))
1339 return true;
1334 /* 1340 /*
1335 * If there exists a kprobe_blacklist, verify and 1341 * If there exists a kprobe_blacklist, verify and
1336 * fail any probe registration in the prohibited area 1342 * fail any probe registration in the prohibited area
1337 */ 1343 */
1338 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1344 list_for_each_entry(ent, &kprobe_blacklist, list) {
1339 if (kb->start_addr) { 1345 if (addr >= ent->start_addr && addr < ent->end_addr)
1340 if (addr >= kb->start_addr && 1346 return true;
1341 addr < (kb->start_addr + kb->range))
1342 return -EINVAL;
1343 }
1344 } 1347 }
1345 return 0; 1348
1349 return false;
1346} 1350}
1347 1351
1348/* 1352/*
@@ -1351,7 +1355,7 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
1351 * This returns encoded errors if it fails to look up symbol or invalid 1355 * This returns encoded errors if it fails to look up symbol or invalid
1352 * combination of parameters. 1356 * combination of parameters.
1353 */ 1357 */
1354static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 1358static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1355{ 1359{
1356 kprobe_opcode_t *addr = p->addr; 1360 kprobe_opcode_t *addr = p->addr;
1357 1361
@@ -1374,7 +1378,7 @@ invalid:
1374} 1378}
1375 1379
1376/* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1380/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1377static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1381static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1378{ 1382{
1379 struct kprobe *ap, *list_p; 1383 struct kprobe *ap, *list_p;
1380 1384
@@ -1406,8 +1410,8 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1406 return ret; 1410 return ret;
1407} 1411}
1408 1412
1409static __kprobes int check_kprobe_address_safe(struct kprobe *p, 1413static int check_kprobe_address_safe(struct kprobe *p,
1410 struct module **probed_mod) 1414 struct module **probed_mod)
1411{ 1415{
1412 int ret = 0; 1416 int ret = 0;
1413 unsigned long ftrace_addr; 1417 unsigned long ftrace_addr;
@@ -1433,7 +1437,7 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1433 1437
1434 /* Ensure it is not in reserved area nor out of text */ 1438 /* Ensure it is not in reserved area nor out of text */
1435 if (!kernel_text_address((unsigned long) p->addr) || 1439 if (!kernel_text_address((unsigned long) p->addr) ||
1436 in_kprobes_functions((unsigned long) p->addr) || 1440 within_kprobe_blacklist((unsigned long) p->addr) ||
1437 jump_label_text_reserved(p->addr, p->addr)) { 1441 jump_label_text_reserved(p->addr, p->addr)) {
1438 ret = -EINVAL; 1442 ret = -EINVAL;
1439 goto out; 1443 goto out;
@@ -1469,7 +1473,7 @@ out:
1469 return ret; 1473 return ret;
1470} 1474}
1471 1475
1472int __kprobes register_kprobe(struct kprobe *p) 1476int register_kprobe(struct kprobe *p)
1473{ 1477{
1474 int ret; 1478 int ret;
1475 struct kprobe *old_p; 1479 struct kprobe *old_p;
@@ -1531,7 +1535,7 @@ out:
1531EXPORT_SYMBOL_GPL(register_kprobe); 1535EXPORT_SYMBOL_GPL(register_kprobe);
1532 1536
1533/* Check if all probes on the aggrprobe are disabled */ 1537/* Check if all probes on the aggrprobe are disabled */
1534static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) 1538static int aggr_kprobe_disabled(struct kprobe *ap)
1535{ 1539{
1536 struct kprobe *kp; 1540 struct kprobe *kp;
1537 1541
@@ -1547,7 +1551,7 @@ static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1547} 1551}
1548 1552
1549/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1553/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1550static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) 1554static struct kprobe *__disable_kprobe(struct kprobe *p)
1551{ 1555{
1552 struct kprobe *orig_p; 1556 struct kprobe *orig_p;
1553 1557
@@ -1574,7 +1578,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1574/* 1578/*
1575 * Unregister a kprobe without a scheduler synchronization. 1579 * Unregister a kprobe without a scheduler synchronization.
1576 */ 1580 */
1577static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1581static int __unregister_kprobe_top(struct kprobe *p)
1578{ 1582{
1579 struct kprobe *ap, *list_p; 1583 struct kprobe *ap, *list_p;
1580 1584
@@ -1631,7 +1635,7 @@ disarmed:
1631 return 0; 1635 return 0;
1632} 1636}
1633 1637
1634static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1638static void __unregister_kprobe_bottom(struct kprobe *p)
1635{ 1639{
1636 struct kprobe *ap; 1640 struct kprobe *ap;
1637 1641
@@ -1647,7 +1651,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1647 /* Otherwise, do nothing. */ 1651 /* Otherwise, do nothing. */
1648} 1652}
1649 1653
1650int __kprobes register_kprobes(struct kprobe **kps, int num) 1654int register_kprobes(struct kprobe **kps, int num)
1651{ 1655{
1652 int i, ret = 0; 1656 int i, ret = 0;
1653 1657
@@ -1665,13 +1669,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
1665} 1669}
1666EXPORT_SYMBOL_GPL(register_kprobes); 1670EXPORT_SYMBOL_GPL(register_kprobes);
1667 1671
1668void __kprobes unregister_kprobe(struct kprobe *p) 1672void unregister_kprobe(struct kprobe *p)
1669{ 1673{
1670 unregister_kprobes(&p, 1); 1674 unregister_kprobes(&p, 1);
1671} 1675}
1672EXPORT_SYMBOL_GPL(unregister_kprobe); 1676EXPORT_SYMBOL_GPL(unregister_kprobe);
1673 1677
1674void __kprobes unregister_kprobes(struct kprobe **kps, int num) 1678void unregister_kprobes(struct kprobe **kps, int num)
1675{ 1679{
1676 int i; 1680 int i;
1677 1681
@@ -1700,7 +1704,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
1700 return (unsigned long)entry; 1704 return (unsigned long)entry;
1701} 1705}
1702 1706
1703int __kprobes register_jprobes(struct jprobe **jps, int num) 1707int register_jprobes(struct jprobe **jps, int num)
1704{ 1708{
1705 struct jprobe *jp; 1709 struct jprobe *jp;
1706 int ret = 0, i; 1710 int ret = 0, i;
@@ -1731,19 +1735,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
1731} 1735}
1732EXPORT_SYMBOL_GPL(register_jprobes); 1736EXPORT_SYMBOL_GPL(register_jprobes);
1733 1737
1734int __kprobes register_jprobe(struct jprobe *jp) 1738int register_jprobe(struct jprobe *jp)
1735{ 1739{
1736 return register_jprobes(&jp, 1); 1740 return register_jprobes(&jp, 1);
1737} 1741}
1738EXPORT_SYMBOL_GPL(register_jprobe); 1742EXPORT_SYMBOL_GPL(register_jprobe);
1739 1743
1740void __kprobes unregister_jprobe(struct jprobe *jp) 1744void unregister_jprobe(struct jprobe *jp)
1741{ 1745{
1742 unregister_jprobes(&jp, 1); 1746 unregister_jprobes(&jp, 1);
1743} 1747}
1744EXPORT_SYMBOL_GPL(unregister_jprobe); 1748EXPORT_SYMBOL_GPL(unregister_jprobe);
1745 1749
1746void __kprobes unregister_jprobes(struct jprobe **jps, int num) 1750void unregister_jprobes(struct jprobe **jps, int num)
1747{ 1751{
1748 int i; 1752 int i;
1749 1753
@@ -1768,8 +1772,7 @@ EXPORT_SYMBOL_GPL(unregister_jprobes);
1768 * This kprobe pre_handler is registered with every kretprobe. When probe 1772 * This kprobe pre_handler is registered with every kretprobe. When probe
1769 * hits it will set up the return probe. 1773 * hits it will set up the return probe.
1770 */ 1774 */
1771static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1775static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1772 struct pt_regs *regs)
1773{ 1776{
1774 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1777 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1775 unsigned long hash, flags = 0; 1778 unsigned long hash, flags = 0;
@@ -1807,8 +1810,9 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1807 } 1810 }
1808 return 0; 1811 return 0;
1809} 1812}
1813NOKPROBE_SYMBOL(pre_handler_kretprobe);
1810 1814
1811int __kprobes register_kretprobe(struct kretprobe *rp) 1815int register_kretprobe(struct kretprobe *rp)
1812{ 1816{
1813 int ret = 0; 1817 int ret = 0;
1814 struct kretprobe_instance *inst; 1818 struct kretprobe_instance *inst;
@@ -1861,7 +1865,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1861} 1865}
1862EXPORT_SYMBOL_GPL(register_kretprobe); 1866EXPORT_SYMBOL_GPL(register_kretprobe);
1863 1867
1864int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1868int register_kretprobes(struct kretprobe **rps, int num)
1865{ 1869{
1866 int ret = 0, i; 1870 int ret = 0, i;
1867 1871
@@ -1879,13 +1883,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1879} 1883}
1880EXPORT_SYMBOL_GPL(register_kretprobes); 1884EXPORT_SYMBOL_GPL(register_kretprobes);
1881 1885
1882void __kprobes unregister_kretprobe(struct kretprobe *rp) 1886void unregister_kretprobe(struct kretprobe *rp)
1883{ 1887{
1884 unregister_kretprobes(&rp, 1); 1888 unregister_kretprobes(&rp, 1);
1885} 1889}
1886EXPORT_SYMBOL_GPL(unregister_kretprobe); 1890EXPORT_SYMBOL_GPL(unregister_kretprobe);
1887 1891
1888void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1892void unregister_kretprobes(struct kretprobe **rps, int num)
1889{ 1893{
1890 int i; 1894 int i;
1891 1895
@@ -1908,38 +1912,38 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1908EXPORT_SYMBOL_GPL(unregister_kretprobes); 1912EXPORT_SYMBOL_GPL(unregister_kretprobes);
1909 1913
1910#else /* CONFIG_KRETPROBES */ 1914#else /* CONFIG_KRETPROBES */
1911int __kprobes register_kretprobe(struct kretprobe *rp) 1915int register_kretprobe(struct kretprobe *rp)
1912{ 1916{
1913 return -ENOSYS; 1917 return -ENOSYS;
1914} 1918}
1915EXPORT_SYMBOL_GPL(register_kretprobe); 1919EXPORT_SYMBOL_GPL(register_kretprobe);
1916 1920
1917int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1921int register_kretprobes(struct kretprobe **rps, int num)
1918{ 1922{
1919 return -ENOSYS; 1923 return -ENOSYS;
1920} 1924}
1921EXPORT_SYMBOL_GPL(register_kretprobes); 1925EXPORT_SYMBOL_GPL(register_kretprobes);
1922 1926
1923void __kprobes unregister_kretprobe(struct kretprobe *rp) 1927void unregister_kretprobe(struct kretprobe *rp)
1924{ 1928{
1925} 1929}
1926EXPORT_SYMBOL_GPL(unregister_kretprobe); 1930EXPORT_SYMBOL_GPL(unregister_kretprobe);
1927 1931
1928void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1932void unregister_kretprobes(struct kretprobe **rps, int num)
1929{ 1933{
1930} 1934}
1931EXPORT_SYMBOL_GPL(unregister_kretprobes); 1935EXPORT_SYMBOL_GPL(unregister_kretprobes);
1932 1936
1933static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1937static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1934 struct pt_regs *regs)
1935{ 1938{
1936 return 0; 1939 return 0;
1937} 1940}
1941NOKPROBE_SYMBOL(pre_handler_kretprobe);
1938 1942
1939#endif /* CONFIG_KRETPROBES */ 1943#endif /* CONFIG_KRETPROBES */
1940 1944
1941/* Set the kprobe gone and remove its instruction buffer. */ 1945/* Set the kprobe gone and remove its instruction buffer. */
1942static void __kprobes kill_kprobe(struct kprobe *p) 1946static void kill_kprobe(struct kprobe *p)
1943{ 1947{
1944 struct kprobe *kp; 1948 struct kprobe *kp;
1945 1949
@@ -1963,7 +1967,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1963} 1967}
1964 1968
1965/* Disable one kprobe */ 1969/* Disable one kprobe */
1966int __kprobes disable_kprobe(struct kprobe *kp) 1970int disable_kprobe(struct kprobe *kp)
1967{ 1971{
1968 int ret = 0; 1972 int ret = 0;
1969 1973
@@ -1979,7 +1983,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1979EXPORT_SYMBOL_GPL(disable_kprobe); 1983EXPORT_SYMBOL_GPL(disable_kprobe);
1980 1984
1981/* Enable one kprobe */ 1985/* Enable one kprobe */
1982int __kprobes enable_kprobe(struct kprobe *kp) 1986int enable_kprobe(struct kprobe *kp)
1983{ 1987{
1984 int ret = 0; 1988 int ret = 0;
1985 struct kprobe *p; 1989 struct kprobe *p;
@@ -2012,16 +2016,49 @@ out:
2012} 2016}
2013EXPORT_SYMBOL_GPL(enable_kprobe); 2017EXPORT_SYMBOL_GPL(enable_kprobe);
2014 2018
2015void __kprobes dump_kprobe(struct kprobe *kp) 2019void dump_kprobe(struct kprobe *kp)
2016{ 2020{
2017 printk(KERN_WARNING "Dumping kprobe:\n"); 2021 printk(KERN_WARNING "Dumping kprobe:\n");
2018 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 2022 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2019 kp->symbol_name, kp->addr, kp->offset); 2023 kp->symbol_name, kp->addr, kp->offset);
2020} 2024}
2025NOKPROBE_SYMBOL(dump_kprobe);
2026
2027/*
2028 * Lookup and populate the kprobe_blacklist.
2029 *
2030 * Unlike the kretprobe blacklist, we'll need to determine
2031 * the range of addresses that belong to the said functions,
2032 * since a kprobe need not necessarily be at the beginning
2033 * of a function.
2034 */
2035static int __init populate_kprobe_blacklist(unsigned long *start,
2036 unsigned long *end)
2037{
2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0;
2041
2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter);
2045 continue;
2046 }
2047
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent)
2050 return -ENOMEM;
2051 ent->start_addr = *iter;
2052 ent->end_addr = *iter + size;
2053 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist);
2055 }
2056 return 0;
2057}
2021 2058
2022/* Module notifier call back, checking kprobes on the module */ 2059/* Module notifier call back, checking kprobes on the module */
2023static int __kprobes kprobes_module_callback(struct notifier_block *nb, 2060static int kprobes_module_callback(struct notifier_block *nb,
2024 unsigned long val, void *data) 2061 unsigned long val, void *data)
2025{ 2062{
2026 struct module *mod = data; 2063 struct module *mod = data;
2027 struct hlist_head *head; 2064 struct hlist_head *head;
@@ -2062,14 +2099,13 @@ static struct notifier_block kprobe_module_nb = {
2062 .priority = 0 2099 .priority = 0
2063}; 2100};
2064 2101
2102/* Markers of _kprobe_blacklist section */
2103extern unsigned long __start_kprobe_blacklist[];
2104extern unsigned long __stop_kprobe_blacklist[];
2105
2065static int __init init_kprobes(void) 2106static int __init init_kprobes(void)
2066{ 2107{
2067 int i, err = 0; 2108 int i, err = 0;
2068 unsigned long offset = 0, size = 0;
2069 char *modname, namebuf[KSYM_NAME_LEN];
2070 const char *symbol_name;
2071 void *addr;
2072 struct kprobe_blackpoint *kb;
2073 2109
2074 /* FIXME allocate the probe table, currently defined statically */ 2110 /* FIXME allocate the probe table, currently defined statically */
2075 /* initialize all list heads */ 2111 /* initialize all list heads */
@@ -2079,26 +2115,11 @@ static int __init init_kprobes(void)
2079 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2115 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2080 } 2116 }
2081 2117
2082 /* 2118 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2083 * Lookup and populate the kprobe_blacklist. 2119 __stop_kprobe_blacklist);
2084 * 2120 if (err) {
2085 * Unlike the kretprobe blacklist, we'll need to determine 2121 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2086 * the range of addresses that belong to the said functions, 2122 pr_err("Please take care of using kprobes.\n");
2087 * since a kprobe need not necessarily be at the beginning
2088 * of a function.
2089 */
2090 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2091 kprobe_lookup_name(kb->name, addr);
2092 if (!addr)
2093 continue;
2094
2095 kb->start_addr = (unsigned long)addr;
2096 symbol_name = kallsyms_lookup(kb->start_addr,
2097 &size, &offset, &modname, namebuf);
2098 if (!symbol_name)
2099 kb->range = 0;
2100 else
2101 kb->range = size;
2102 } 2123 }
2103 2124
2104 if (kretprobe_blacklist_size) { 2125 if (kretprobe_blacklist_size) {
@@ -2138,7 +2159,7 @@ static int __init init_kprobes(void)
2138} 2159}
2139 2160
2140#ifdef CONFIG_DEBUG_FS 2161#ifdef CONFIG_DEBUG_FS
2141static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 2162static void report_probe(struct seq_file *pi, struct kprobe *p,
2142 const char *sym, int offset, char *modname, struct kprobe *pp) 2163 const char *sym, int offset, char *modname, struct kprobe *pp)
2143{ 2164{
2144 char *kprobe_type; 2165 char *kprobe_type;
@@ -2167,12 +2188,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2167 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2188 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2168} 2189}
2169 2190
2170static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2191static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2171{ 2192{
2172 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2193 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2173} 2194}
2174 2195
2175static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2196static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2176{ 2197{
2177 (*pos)++; 2198 (*pos)++;
2178 if (*pos >= KPROBE_TABLE_SIZE) 2199 if (*pos >= KPROBE_TABLE_SIZE)
@@ -2180,12 +2201,12 @@ static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2180 return pos; 2201 return pos;
2181} 2202}
2182 2203
2183static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 2204static void kprobe_seq_stop(struct seq_file *f, void *v)
2184{ 2205{
2185 /* Nothing to do */ 2206 /* Nothing to do */
2186} 2207}
2187 2208
2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2209static int show_kprobe_addr(struct seq_file *pi, void *v)
2189{ 2210{
2190 struct hlist_head *head; 2211 struct hlist_head *head;
2191 struct kprobe *p, *kp; 2212 struct kprobe *p, *kp;
@@ -2216,7 +2237,7 @@ static const struct seq_operations kprobes_seq_ops = {
2216 .show = show_kprobe_addr 2237 .show = show_kprobe_addr
2217}; 2238};
2218 2239
2219static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 2240static int kprobes_open(struct inode *inode, struct file *filp)
2220{ 2241{
2221 return seq_open(filp, &kprobes_seq_ops); 2242 return seq_open(filp, &kprobes_seq_ops);
2222} 2243}
@@ -2228,7 +2249,47 @@ static const struct file_operations debugfs_kprobes_operations = {
2228 .release = seq_release, 2249 .release = seq_release,
2229}; 2250};
2230 2251
2231static void __kprobes arm_all_kprobes(void) 2252/* kprobes/blacklist -- shows which functions can not be probed */
2253static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2254{
2255 return seq_list_start(&kprobe_blacklist, *pos);
2256}
2257
2258static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2259{
2260 return seq_list_next(v, &kprobe_blacklist, pos);
2261}
2262
2263static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2264{
2265 struct kprobe_blacklist_entry *ent =
2266 list_entry(v, struct kprobe_blacklist_entry, list);
2267
2268 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2269 (void *)ent->end_addr, (void *)ent->start_addr);
2270 return 0;
2271}
2272
2273static const struct seq_operations kprobe_blacklist_seq_ops = {
2274 .start = kprobe_blacklist_seq_start,
2275 .next = kprobe_blacklist_seq_next,
2276 .stop = kprobe_seq_stop, /* Reuse void function */
2277 .show = kprobe_blacklist_seq_show,
2278};
2279
2280static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2281{
2282 return seq_open(filp, &kprobe_blacklist_seq_ops);
2283}
2284
2285static const struct file_operations debugfs_kprobe_blacklist_ops = {
2286 .open = kprobe_blacklist_open,
2287 .read = seq_read,
2288 .llseek = seq_lseek,
2289 .release = seq_release,
2290};
2291
2292static void arm_all_kprobes(void)
2232{ 2293{
2233 struct hlist_head *head; 2294 struct hlist_head *head;
2234 struct kprobe *p; 2295 struct kprobe *p;
@@ -2256,7 +2317,7 @@ already_enabled:
2256 return; 2317 return;
2257} 2318}
2258 2319
2259static void __kprobes disarm_all_kprobes(void) 2320static void disarm_all_kprobes(void)
2260{ 2321{
2261 struct hlist_head *head; 2322 struct hlist_head *head;
2262 struct kprobe *p; 2323 struct kprobe *p;
@@ -2340,7 +2401,7 @@ static const struct file_operations fops_kp = {
2340 .llseek = default_llseek, 2401 .llseek = default_llseek,
2341}; 2402};
2342 2403
2343static int __kprobes debugfs_kprobe_init(void) 2404static int __init debugfs_kprobe_init(void)
2344{ 2405{
2345 struct dentry *dir, *file; 2406 struct dentry *dir, *file;
2346 unsigned int value = 1; 2407 unsigned int value = 1;
@@ -2351,19 +2412,24 @@ static int __kprobes debugfs_kprobe_init(void)
2351 2412
2352 file = debugfs_create_file("list", 0444, dir, NULL, 2413 file = debugfs_create_file("list", 0444, dir, NULL,
2353 &debugfs_kprobes_operations); 2414 &debugfs_kprobes_operations);
2354 if (!file) { 2415 if (!file)
2355 debugfs_remove(dir); 2416 goto error;
2356 return -ENOMEM;
2357 }
2358 2417
2359 file = debugfs_create_file("enabled", 0600, dir, 2418 file = debugfs_create_file("enabled", 0600, dir,
2360 &value, &fops_kp); 2419 &value, &fops_kp);
2361 if (!file) { 2420 if (!file)
2362 debugfs_remove(dir); 2421 goto error;
2363 return -ENOMEM; 2422
2364 } 2423 file = debugfs_create_file("blacklist", 0444, dir, NULL,
2424 &debugfs_kprobe_blacklist_ops);
2425 if (!file)
2426 goto error;
2365 2427
2366 return 0; 2428 return 0;
2429
2430error:
2431 debugfs_remove(dir);
2432 return -ENOMEM;
2367} 2433}
2368 2434
2369late_initcall(debugfs_kprobe_init); 2435late_initcall(debugfs_kprobe_init);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index db4c8b08a50c..4803da6eab62 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -71,9 +71,9 @@ static int notifier_chain_unregister(struct notifier_block **nl,
71 * @returns: notifier_call_chain returns the value returned by the 71 * @returns: notifier_call_chain returns the value returned by the
72 * last notifier function called. 72 * last notifier function called.
73 */ 73 */
74static int __kprobes notifier_call_chain(struct notifier_block **nl, 74static int notifier_call_chain(struct notifier_block **nl,
75 unsigned long val, void *v, 75 unsigned long val, void *v,
76 int nr_to_call, int *nr_calls) 76 int nr_to_call, int *nr_calls)
77{ 77{
78 int ret = NOTIFY_DONE; 78 int ret = NOTIFY_DONE;
79 struct notifier_block *nb, *next_nb; 79 struct notifier_block *nb, *next_nb;
@@ -102,6 +102,7 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
102 } 102 }
103 return ret; 103 return ret;
104} 104}
105NOKPROBE_SYMBOL(notifier_call_chain);
105 106
106/* 107/*
107 * Atomic notifier chain routines. Registration and unregistration 108 * Atomic notifier chain routines. Registration and unregistration
@@ -172,9 +173,9 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
172 * Otherwise the return value is the return value 173 * Otherwise the return value is the return value
173 * of the last notifier function called. 174 * of the last notifier function called.
174 */ 175 */
175int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, 176int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
176 unsigned long val, void *v, 177 unsigned long val, void *v,
177 int nr_to_call, int *nr_calls) 178 int nr_to_call, int *nr_calls)
178{ 179{
179 int ret; 180 int ret;
180 181
@@ -184,13 +185,15 @@ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
184 return ret; 185 return ret;
185} 186}
186EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); 187EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
188NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
187 189
188int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, 190int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
189 unsigned long val, void *v) 191 unsigned long val, void *v)
190{ 192{
191 return __atomic_notifier_call_chain(nh, val, v, -1, NULL); 193 return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
192} 194}
193EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 195EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
196NOKPROBE_SYMBOL(atomic_notifier_call_chain);
194 197
195/* 198/*
196 * Blocking notifier chain routines. All access to the chain is 199 * Blocking notifier chain routines. All access to the chain is
@@ -527,7 +530,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
527 530
528static ATOMIC_NOTIFIER_HEAD(die_chain); 531static ATOMIC_NOTIFIER_HEAD(die_chain);
529 532
530int notrace __kprobes notify_die(enum die_val val, const char *str, 533int notrace notify_die(enum die_val val, const char *str,
531 struct pt_regs *regs, long err, int trap, int sig) 534 struct pt_regs *regs, long err, int trap, int sig)
532{ 535{
533 struct die_args args = { 536 struct die_args args = {
@@ -540,6 +543,7 @@ int notrace __kprobes notify_die(enum die_val val, const char *str,
540 }; 543 };
541 return atomic_notifier_call_chain(&die_chain, val, &args); 544 return atomic_notifier_call_chain(&die_chain, val, &args);
542} 545}
546NOKPROBE_SYMBOL(notify_die);
543 547
544int register_die_notifier(struct notifier_block *nb) 548int register_die_notifier(struct notifier_block *nb)
545{ 549{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 268a45ea238c..00781cc38047 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2480,7 +2480,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
2480#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2480#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2481 defined(CONFIG_PREEMPT_TRACER)) 2481 defined(CONFIG_PREEMPT_TRACER))
2482 2482
2483void __kprobes preempt_count_add(int val) 2483void preempt_count_add(int val)
2484{ 2484{
2485#ifdef CONFIG_DEBUG_PREEMPT 2485#ifdef CONFIG_DEBUG_PREEMPT
2486 /* 2486 /*
@@ -2506,8 +2506,9 @@ void __kprobes preempt_count_add(int val)
2506 } 2506 }
2507} 2507}
2508EXPORT_SYMBOL(preempt_count_add); 2508EXPORT_SYMBOL(preempt_count_add);
2509NOKPROBE_SYMBOL(preempt_count_add);
2509 2510
2510void __kprobes preempt_count_sub(int val) 2511void preempt_count_sub(int val)
2511{ 2512{
2512#ifdef CONFIG_DEBUG_PREEMPT 2513#ifdef CONFIG_DEBUG_PREEMPT
2513 /* 2514 /*
@@ -2528,6 +2529,7 @@ void __kprobes preempt_count_sub(int val)
2528 __preempt_count_sub(val); 2529 __preempt_count_sub(val);
2529} 2530}
2530EXPORT_SYMBOL(preempt_count_sub); 2531EXPORT_SYMBOL(preempt_count_sub);
2532NOKPROBE_SYMBOL(preempt_count_sub);
2531 2533
2532#endif 2534#endif
2533 2535
@@ -2804,6 +2806,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
2804 barrier(); 2806 barrier();
2805 } while (need_resched()); 2807 } while (need_resched());
2806} 2808}
2809NOKPROBE_SYMBOL(preempt_schedule);
2807EXPORT_SYMBOL(preempt_schedule); 2810EXPORT_SYMBOL(preempt_schedule);
2808#endif /* CONFIG_PREEMPT */ 2811#endif /* CONFIG_PREEMPT */
2809 2812
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index c894614de14d..5d12bb407b44 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -248,8 +248,8 @@ void perf_trace_del(struct perf_event *p_event, int flags)
248 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); 248 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
249} 249}
250 250
251__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, 251void *perf_trace_buf_prepare(int size, unsigned short type,
252 struct pt_regs *regs, int *rctxp) 252 struct pt_regs *regs, int *rctxp)
253{ 253{
254 struct trace_entry *entry; 254 struct trace_entry *entry;
255 unsigned long flags; 255 unsigned long flags;
@@ -281,6 +281,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
281 return raw_data; 281 return raw_data;
282} 282}
283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); 283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284NOKPROBE_SYMBOL(perf_trace_buf_prepare);
284 285
285#ifdef CONFIG_FUNCTION_TRACER 286#ifdef CONFIG_FUNCTION_TRACER
286static void 287static void
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 903ae28962be..242e4ec97d94 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -40,27 +40,27 @@ struct trace_kprobe {
40 (sizeof(struct probe_arg) * (n))) 40 (sizeof(struct probe_arg) * (n)))
41 41
42 42
43static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) 43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
44{ 44{
45 return tk->rp.handler != NULL; 45 return tk->rp.handler != NULL;
46} 46}
47 47
48static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) 48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
49{ 49{
50 return tk->symbol ? tk->symbol : "unknown"; 50 return tk->symbol ? tk->symbol : "unknown";
51} 51}
52 52
53static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
54{ 54{
55 return tk->rp.kp.offset; 55 return tk->rp.kp.offset;
56} 56}
57 57
58static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) 58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
59{ 59{
60 return !!(kprobe_gone(&tk->rp.kp)); 60 return !!(kprobe_gone(&tk->rp.kp));
61} 61}
62 62
63static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, 63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
64 struct module *mod) 64 struct module *mod)
65{ 65{
66 int len = strlen(mod->name); 66 int len = strlen(mod->name);
@@ -68,7 +68,7 @@ static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
68 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 68 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
69} 69}
70 70
71static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
72{ 72{
73 return !!strchr(trace_kprobe_symbol(tk), ':'); 73 return !!strchr(trace_kprobe_symbol(tk), ':');
74} 74}
@@ -132,19 +132,21 @@ struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
132 * Kprobes-specific fetch functions 132 * Kprobes-specific fetch functions
133 */ 133 */
134#define DEFINE_FETCH_stack(type) \ 134#define DEFINE_FETCH_stack(type) \
135static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 135static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
136 void *offset, void *dest) \ 136 void *offset, void *dest) \
137{ \ 137{ \
138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
139 (unsigned int)((unsigned long)offset)); \ 139 (unsigned int)((unsigned long)offset)); \
140} 140} \
141NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
142
141DEFINE_BASIC_FETCH_FUNCS(stack) 143DEFINE_BASIC_FETCH_FUNCS(stack)
142/* No string on the stack entry */ 144/* No string on the stack entry */
143#define fetch_stack_string NULL 145#define fetch_stack_string NULL
144#define fetch_stack_string_size NULL 146#define fetch_stack_string_size NULL
145 147
146#define DEFINE_FETCH_memory(type) \ 148#define DEFINE_FETCH_memory(type) \
147static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 149static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
148 void *addr, void *dest) \ 150 void *addr, void *dest) \
149{ \ 151{ \
150 type retval; \ 152 type retval; \
@@ -152,14 +154,16 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
152 *(type *)dest = 0; \ 154 *(type *)dest = 0; \
153 else \ 155 else \
154 *(type *)dest = retval; \ 156 *(type *)dest = retval; \
155} 157} \
158NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
159
156DEFINE_BASIC_FETCH_FUNCS(memory) 160DEFINE_BASIC_FETCH_FUNCS(memory)
157/* 161/*
158 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
159 * length and relative data location. 163 * length and relative data location.
160 */ 164 */
161static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 165static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
162 void *addr, void *dest) 166 void *addr, void *dest)
163{ 167{
164 long ret; 168 long ret;
165 int maxlen = get_rloc_len(*(u32 *)dest); 169 int maxlen = get_rloc_len(*(u32 *)dest);
@@ -193,10 +197,11 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
193 get_rloc_offs(*(u32 *)dest)); 197 get_rloc_offs(*(u32 *)dest));
194 } 198 }
195} 199}
200NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
196 201
197/* Return the length of string -- including null terminal byte */ 202/* Return the length of string -- including null terminal byte */
198static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 203static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
199 void *addr, void *dest) 204 void *addr, void *dest)
200{ 205{
201 mm_segment_t old_fs; 206 mm_segment_t old_fs;
202 int ret, len = 0; 207 int ret, len = 0;
@@ -219,17 +224,19 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
219 else 224 else
220 *(u32 *)dest = len; 225 *(u32 *)dest = len;
221} 226}
227NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
222 228
223#define DEFINE_FETCH_symbol(type) \ 229#define DEFINE_FETCH_symbol(type) \
224__kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \ 230void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
225 void *data, void *dest) \
226{ \ 231{ \
227 struct symbol_cache *sc = data; \ 232 struct symbol_cache *sc = data; \
228 if (sc->addr) \ 233 if (sc->addr) \
229 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 234 fetch_memory_##type(regs, (void *)sc->addr, dest); \
230 else \ 235 else \
231 *(type *)dest = 0; \ 236 *(type *)dest = 0; \
232} 237} \
238NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
239
233DEFINE_BASIC_FETCH_FUNCS(symbol) 240DEFINE_BASIC_FETCH_FUNCS(symbol)
234DEFINE_FETCH_symbol(string) 241DEFINE_FETCH_symbol(string)
235DEFINE_FETCH_symbol(string_size) 242DEFINE_FETCH_symbol(string_size)
@@ -907,7 +914,7 @@ static const struct file_operations kprobe_profile_ops = {
907}; 914};
908 915
909/* Kprobe handler */ 916/* Kprobe handler */
910static __kprobes void 917static nokprobe_inline void
911__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 918__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
912 struct ftrace_event_file *ftrace_file) 919 struct ftrace_event_file *ftrace_file)
913{ 920{
@@ -943,7 +950,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
943 entry, irq_flags, pc, regs); 950 entry, irq_flags, pc, regs);
944} 951}
945 952
946static __kprobes void 953static void
947kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 954kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
948{ 955{
949 struct event_file_link *link; 956 struct event_file_link *link;
@@ -951,9 +958,10 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
951 list_for_each_entry_rcu(link, &tk->tp.files, list) 958 list_for_each_entry_rcu(link, &tk->tp.files, list)
952 __kprobe_trace_func(tk, regs, link->file); 959 __kprobe_trace_func(tk, regs, link->file);
953} 960}
961NOKPROBE_SYMBOL(kprobe_trace_func);
954 962
955/* Kretprobe handler */ 963/* Kretprobe handler */
956static __kprobes void 964static nokprobe_inline void
957__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 965__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
958 struct pt_regs *regs, 966 struct pt_regs *regs,
959 struct ftrace_event_file *ftrace_file) 967 struct ftrace_event_file *ftrace_file)
@@ -991,7 +999,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
991 entry, irq_flags, pc, regs); 999 entry, irq_flags, pc, regs);
992} 1000}
993 1001
994static __kprobes void 1002static void
995kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1003kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
996 struct pt_regs *regs) 1004 struct pt_regs *regs)
997{ 1005{
@@ -1000,6 +1008,7 @@ kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1000 list_for_each_entry_rcu(link, &tk->tp.files, list) 1008 list_for_each_entry_rcu(link, &tk->tp.files, list)
1001 __kretprobe_trace_func(tk, ri, regs, link->file); 1009 __kretprobe_trace_func(tk, ri, regs, link->file);
1002} 1010}
1011NOKPROBE_SYMBOL(kretprobe_trace_func);
1003 1012
1004/* Event entry printers */ 1013/* Event entry printers */
1005static enum print_line_t 1014static enum print_line_t
@@ -1131,7 +1140,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1131#ifdef CONFIG_PERF_EVENTS 1140#ifdef CONFIG_PERF_EVENTS
1132 1141
1133/* Kprobe profile handler */ 1142/* Kprobe profile handler */
1134static __kprobes void 1143static void
1135kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1144kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1136{ 1145{
1137 struct ftrace_event_call *call = &tk->tp.call; 1146 struct ftrace_event_call *call = &tk->tp.call;
@@ -1158,9 +1167,10 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1158 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1167 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1159 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1168 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1160} 1169}
1170NOKPROBE_SYMBOL(kprobe_perf_func);
1161 1171
1162/* Kretprobe profile handler */ 1172/* Kretprobe profile handler */
1163static __kprobes void 1173static void
1164kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1174kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1165 struct pt_regs *regs) 1175 struct pt_regs *regs)
1166{ 1176{
@@ -1188,6 +1198,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1188 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1198 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1189 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1199 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1190} 1200}
1201NOKPROBE_SYMBOL(kretprobe_perf_func);
1191#endif /* CONFIG_PERF_EVENTS */ 1202#endif /* CONFIG_PERF_EVENTS */
1192 1203
1193/* 1204/*
@@ -1196,9 +1207,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1196 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1207 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1197 * lockless, but we can't race with this __init function. 1208 * lockless, but we can't race with this __init function.
1198 */ 1209 */
1199static __kprobes 1210static int kprobe_register(struct ftrace_event_call *event,
1200int kprobe_register(struct ftrace_event_call *event, 1211 enum trace_reg type, void *data)
1201 enum trace_reg type, void *data)
1202{ 1212{
1203 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1213 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1204 struct ftrace_event_file *file = data; 1214 struct ftrace_event_file *file = data;
@@ -1224,8 +1234,7 @@ int kprobe_register(struct ftrace_event_call *event,
1224 return 0; 1234 return 0;
1225} 1235}
1226 1236
1227static __kprobes 1237static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1228int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1229{ 1238{
1230 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1239 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1231 1240
@@ -1239,9 +1248,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1239#endif 1248#endif
1240 return 0; /* We don't tweek kernel, so just return 0 */ 1249 return 0; /* We don't tweek kernel, so just return 0 */
1241} 1250}
1251NOKPROBE_SYMBOL(kprobe_dispatcher);
1242 1252
1243static __kprobes 1253static int
1244int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1254kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1245{ 1255{
1246 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1256 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1247 1257
@@ -1255,6 +1265,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1255#endif 1265#endif
1256 return 0; /* We don't tweek kernel, so just return 0 */ 1266 return 0; /* We don't tweek kernel, so just return 0 */
1257} 1267}
1268NOKPROBE_SYMBOL(kretprobe_dispatcher);
1258 1269
1259static struct trace_event_functions kretprobe_funcs = { 1270static struct trace_event_functions kretprobe_funcs = {
1260 .trace = print_kretprobe_event 1271 .trace = print_kretprobe_event
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8364a421b4df..d4b9fc22cd27 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -37,13 +37,13 @@ const char *reserved_field_names[] = {
37 37
38/* Printing in basic type function template */ 38/* Printing in basic type function template */
39#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \ 39#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \
40__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ 40int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
41 const char *name, \ 41 void *data, void *ent) \
42 void *data, void *ent) \
43{ \ 42{ \
44 return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ 43 return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \
45} \ 44} \
46const char PRINT_TYPE_FMT_NAME(type)[] = fmt; 45const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \
46NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type));
47 47
48DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x") 48DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x")
49DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x") 49DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x")
@@ -55,9 +55,8 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%d")
55DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld") 55DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld")
56 56
57/* Print type function for string type */ 57/* Print type function for string type */
58__kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, 58int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name,
59 const char *name, 59 void *data, void *ent)
60 void *data, void *ent)
61{ 60{
62 int len = *(u32 *)data >> 16; 61 int len = *(u32 *)data >> 16;
63 62
@@ -67,6 +66,7 @@ __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
67 return trace_seq_printf(s, " %s=\"%s\"", name, 66 return trace_seq_printf(s, " %s=\"%s\"", name,
68 (const char *)get_loc_data(data, ent)); 67 (const char *)get_loc_data(data, ent));
69} 68}
69NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string));
70 70
71const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; 71const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
72 72
@@ -81,23 +81,24 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
81 81
82/* Data fetch function templates */ 82/* Data fetch function templates */
83#define DEFINE_FETCH_reg(type) \ 83#define DEFINE_FETCH_reg(type) \
84__kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ 84void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \
85 void *offset, void *dest) \
86{ \ 85{ \
87 *(type *)dest = (type)regs_get_register(regs, \ 86 *(type *)dest = (type)regs_get_register(regs, \
88 (unsigned int)((unsigned long)offset)); \ 87 (unsigned int)((unsigned long)offset)); \
89} 88} \
89NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type));
90DEFINE_BASIC_FETCH_FUNCS(reg) 90DEFINE_BASIC_FETCH_FUNCS(reg)
91/* No string on the register */ 91/* No string on the register */
92#define fetch_reg_string NULL 92#define fetch_reg_string NULL
93#define fetch_reg_string_size NULL 93#define fetch_reg_string_size NULL
94 94
95#define DEFINE_FETCH_retval(type) \ 95#define DEFINE_FETCH_retval(type) \
96__kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ 96void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \
97 void *dummy, void *dest) \ 97 void *dummy, void *dest) \
98{ \ 98{ \
99 *(type *)dest = (type)regs_return_value(regs); \ 99 *(type *)dest = (type)regs_return_value(regs); \
100} 100} \
101NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type));
101DEFINE_BASIC_FETCH_FUNCS(retval) 102DEFINE_BASIC_FETCH_FUNCS(retval)
102/* No string on the retval */ 103/* No string on the retval */
103#define fetch_retval_string NULL 104#define fetch_retval_string NULL
@@ -112,8 +113,8 @@ struct deref_fetch_param {
112}; 113};
113 114
114#define DEFINE_FETCH_deref(type) \ 115#define DEFINE_FETCH_deref(type) \
115__kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ 116void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
116 void *data, void *dest) \ 117 void *data, void *dest) \
117{ \ 118{ \
118 struct deref_fetch_param *dprm = data; \ 119 struct deref_fetch_param *dprm = data; \
119 unsigned long addr; \ 120 unsigned long addr; \
@@ -123,12 +124,13 @@ __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
123 dprm->fetch(regs, (void *)addr, dest); \ 124 dprm->fetch(regs, (void *)addr, dest); \
124 } else \ 125 } else \
125 *(type *)dest = 0; \ 126 *(type *)dest = 0; \
126} 127} \
128NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type));
127DEFINE_BASIC_FETCH_FUNCS(deref) 129DEFINE_BASIC_FETCH_FUNCS(deref)
128DEFINE_FETCH_deref(string) 130DEFINE_FETCH_deref(string)
129 131
130__kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, 132void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
131 void *data, void *dest) 133 void *data, void *dest)
132{ 134{
133 struct deref_fetch_param *dprm = data; 135 struct deref_fetch_param *dprm = data;
134 unsigned long addr; 136 unsigned long addr;
@@ -140,16 +142,18 @@ __kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
140 } else 142 } else
141 *(string_size *)dest = 0; 143 *(string_size *)dest = 0;
142} 144}
145NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size));
143 146
144static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) 147static void update_deref_fetch_param(struct deref_fetch_param *data)
145{ 148{
146 if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) 149 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
147 update_deref_fetch_param(data->orig.data); 150 update_deref_fetch_param(data->orig.data);
148 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) 151 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
149 update_symbol_cache(data->orig.data); 152 update_symbol_cache(data->orig.data);
150} 153}
154NOKPROBE_SYMBOL(update_deref_fetch_param);
151 155
152static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) 156static void free_deref_fetch_param(struct deref_fetch_param *data)
153{ 157{
154 if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) 158 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
155 free_deref_fetch_param(data->orig.data); 159 free_deref_fetch_param(data->orig.data);
@@ -157,6 +161,7 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
157 free_symbol_cache(data->orig.data); 161 free_symbol_cache(data->orig.data);
158 kfree(data); 162 kfree(data);
159} 163}
164NOKPROBE_SYMBOL(free_deref_fetch_param);
160 165
161/* Bitfield fetch function */ 166/* Bitfield fetch function */
162struct bitfield_fetch_param { 167struct bitfield_fetch_param {
@@ -166,8 +171,8 @@ struct bitfield_fetch_param {
166}; 171};
167 172
168#define DEFINE_FETCH_bitfield(type) \ 173#define DEFINE_FETCH_bitfield(type) \
169__kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ 174void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
170 void *data, void *dest) \ 175 void *data, void *dest) \
171{ \ 176{ \
172 struct bitfield_fetch_param *bprm = data; \ 177 struct bitfield_fetch_param *bprm = data; \
173 type buf = 0; \ 178 type buf = 0; \
@@ -177,13 +182,13 @@ __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
177 buf >>= bprm->low_shift; \ 182 buf >>= bprm->low_shift; \
178 } \ 183 } \
179 *(type *)dest = buf; \ 184 *(type *)dest = buf; \
180} 185} \
181 186NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type));
182DEFINE_BASIC_FETCH_FUNCS(bitfield) 187DEFINE_BASIC_FETCH_FUNCS(bitfield)
183#define fetch_bitfield_string NULL 188#define fetch_bitfield_string NULL
184#define fetch_bitfield_string_size NULL 189#define fetch_bitfield_string_size NULL
185 190
186static __kprobes void 191static void
187update_bitfield_fetch_param(struct bitfield_fetch_param *data) 192update_bitfield_fetch_param(struct bitfield_fetch_param *data)
188{ 193{
189 /* 194 /*
@@ -196,7 +201,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param *data)
196 update_symbol_cache(data->orig.data); 201 update_symbol_cache(data->orig.data);
197} 202}
198 203
199static __kprobes void 204static void
200free_bitfield_fetch_param(struct bitfield_fetch_param *data) 205free_bitfield_fetch_param(struct bitfield_fetch_param *data)
201{ 206{
202 /* 207 /*
@@ -255,17 +260,17 @@ fail:
255} 260}
256 261
257/* Special function : only accept unsigned long */ 262/* Special function : only accept unsigned long */
258static __kprobes void fetch_kernel_stack_address(struct pt_regs *regs, 263static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest)
259 void *dummy, void *dest)
260{ 264{
261 *(unsigned long *)dest = kernel_stack_pointer(regs); 265 *(unsigned long *)dest = kernel_stack_pointer(regs);
262} 266}
267NOKPROBE_SYMBOL(fetch_kernel_stack_address);
263 268
264static __kprobes void fetch_user_stack_address(struct pt_regs *regs, 269static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest)
265 void *dummy, void *dest)
266{ 270{
267 *(unsigned long *)dest = user_stack_pointer(regs); 271 *(unsigned long *)dest = user_stack_pointer(regs);
268} 272}
273NOKPROBE_SYMBOL(fetch_user_stack_address);
269 274
270static fetch_func_t get_fetch_size_function(const struct fetch_type *type, 275static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
271 fetch_func_t orig_fn, 276 fetch_func_t orig_fn,
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index fb1ab5dfbd42..4f815fbce16d 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -81,13 +81,13 @@
81 */ 81 */
82#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) 82#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
83 83
84static inline void *get_rloc_data(u32 *dl) 84static nokprobe_inline void *get_rloc_data(u32 *dl)
85{ 85{
86 return (u8 *)dl + get_rloc_offs(*dl); 86 return (u8 *)dl + get_rloc_offs(*dl);
87} 87}
88 88
89/* For data_loc conversion */ 89/* For data_loc conversion */
90static inline void *get_loc_data(u32 *dl, void *ent) 90static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
91{ 91{
92 return (u8 *)ent + get_rloc_offs(*dl); 92 return (u8 *)ent + get_rloc_offs(*dl);
93} 93}
@@ -136,9 +136,8 @@ typedef u32 string_size;
136 136
137/* Printing in basic type function template */ 137/* Printing in basic type function template */
138#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \ 138#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \
139__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ 139int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
140 const char *name, \ 140 void *data, void *ent); \
141 void *data, void *ent); \
142extern const char PRINT_TYPE_FMT_NAME(type)[] 141extern const char PRINT_TYPE_FMT_NAME(type)[]
143 142
144DECLARE_BASIC_PRINT_TYPE_FUNC(u8); 143DECLARE_BASIC_PRINT_TYPE_FUNC(u8);
@@ -303,7 +302,7 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp)
303 return !!(tp->flags & TP_FLAG_REGISTERED); 302 return !!(tp->flags & TP_FLAG_REGISTERED);
304} 303}
305 304
306static inline __kprobes void call_fetch(struct fetch_param *fprm, 305static nokprobe_inline void call_fetch(struct fetch_param *fprm,
307 struct pt_regs *regs, void *dest) 306 struct pt_regs *regs, void *dest)
308{ 307{
309 return fprm->fn(regs, fprm->data, dest); 308 return fprm->fn(regs, fprm->data, dest);
@@ -351,7 +350,7 @@ extern ssize_t traceprobe_probes_write(struct file *file,
351extern int traceprobe_command(const char *buf, int (*createfn)(int, char**)); 350extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
352 351
353/* Sum up total data length for dynamic arraies (strings) */ 352/* Sum up total data length for dynamic arraies (strings) */
354static inline __kprobes int 353static nokprobe_inline int
355__get_data_size(struct trace_probe *tp, struct pt_regs *regs) 354__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
356{ 355{
357 int i, ret = 0; 356 int i, ret = 0;
@@ -367,7 +366,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
367} 366}
368 367
369/* Store the value of each argument */ 368/* Store the value of each argument */
370static inline __kprobes void 369static nokprobe_inline void
371store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, 370store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
372 u8 *data, int maxlen) 371 u8 *data, int maxlen)
373{ 372{
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 5a7f1a6b3b8b..04fdb5de823c 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
108 * Uprobes-specific fetch functions 108 * Uprobes-specific fetch functions
109 */ 109 */
110#define DEFINE_FETCH_stack(type) \ 110#define DEFINE_FETCH_stack(type) \
111static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \ 112 void *offset, void *dest) \
113{ \ 113{ \
114 *(type *)dest = (type)get_user_stack_nth(regs, \ 114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \ 115 ((unsigned long)offset)); \
@@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack)
120#define fetch_stack_string_size NULL 120#define fetch_stack_string_size NULL
121 121
122#define DEFINE_FETCH_memory(type) \ 122#define DEFINE_FETCH_memory(type) \
123static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \ 124 void *addr, void *dest) \
125{ \ 125{ \
126 type retval; \ 126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \ 127 void __user *vaddr = (void __force __user *) addr; \
@@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory)
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location. 137 * length and relative data location.
138 */ 138 */
139static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest) 140 void *addr, void *dest)
141{ 141{
142 long ret; 142 long ret;
143 u32 rloc = *(u32 *)dest; 143 u32 rloc = *(u32 *)dest;
@@ -158,8 +158,8 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
158 } 158 }
159} 159}
160 160
161static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest) 162 void *addr, void *dest)
163{ 163{
164 int len; 164 int len;
165 void __user *vaddr = (void __force __user *) addr; 165 void __user *vaddr = (void __force __user *) addr;
@@ -184,8 +184,8 @@ static unsigned long translate_user_vaddr(void *file_offset)
184} 184}
185 185
186#define DEFINE_FETCH_file_offset(type) \ 186#define DEFINE_FETCH_file_offset(type) \
187static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\ 187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest) \ 188 void *offset, void *dest)\
189{ \ 189{ \
190 void *vaddr = (void *)translate_user_vaddr(offset); \ 190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \ 191 \