aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/ftrace.h49
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/kernel/entry_32.S75
-rw-r--r--arch/x86/kernel/entry_64.S96
-rw-r--r--arch/x86/kernel/ftrace.c73
-rw-r--r--arch/x86/kernel/kprobes.c48
-rw-r--r--include/linux/ftrace.h158
-rw-r--r--include/linux/kprobes.h27
-rw-r--r--kernel/kprobes.c250
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c322
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_irqsoff.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c5
-rw-r--r--kernel/trace/trace_selftest.c277
-rw-r--r--kernel/trace/trace_stack.c4
19 files changed, 1199 insertions, 216 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b0767bc08740..a6cae0c1720c 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,27 +3,33 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5 5
6 .macro MCOUNT_SAVE_FRAME 6 /* skip is set if the stack was already partially adjusted */
7 /* taken from glibc */ 7 .macro MCOUNT_SAVE_FRAME skip=0
8 subq $0x38, %rsp 8 /*
9 movq %rax, (%rsp) 9 * We add enough stack to save all regs.
10 movq %rcx, 8(%rsp) 10 */
11 movq %rdx, 16(%rsp) 11 subq $(SS+8-\skip), %rsp
12 movq %rsi, 24(%rsp) 12 movq %rax, RAX(%rsp)
13 movq %rdi, 32(%rsp) 13 movq %rcx, RCX(%rsp)
14 movq %r8, 40(%rsp) 14 movq %rdx, RDX(%rsp)
15 movq %r9, 48(%rsp) 15 movq %rsi, RSI(%rsp)
16 movq %rdi, RDI(%rsp)
17 movq %r8, R8(%rsp)
18 movq %r9, R9(%rsp)
19 /* Move RIP to its proper location */
20 movq SS+8(%rsp), %rdx
21 movq %rdx, RIP(%rsp)
16 .endm 22 .endm
17 23
18 .macro MCOUNT_RESTORE_FRAME 24 .macro MCOUNT_RESTORE_FRAME skip=0
19 movq 48(%rsp), %r9 25 movq R9(%rsp), %r9
20 movq 40(%rsp), %r8 26 movq R8(%rsp), %r8
21 movq 32(%rsp), %rdi 27 movq RDI(%rsp), %rdi
22 movq 24(%rsp), %rsi 28 movq RSI(%rsp), %rsi
23 movq 16(%rsp), %rdx 29 movq RDX(%rsp), %rdx
24 movq 8(%rsp), %rcx 30 movq RCX(%rsp), %rcx
25 movq (%rsp), %rax 31 movq RAX(%rsp), %rax
26 addq $0x38, %rsp 32 addq $(SS+8-\skip), %rsp
27 .endm 33 .endm
28 34
29#endif 35#endif
@@ -32,6 +38,11 @@
32#define MCOUNT_ADDR ((long)(mcount)) 38#define MCOUNT_ADDR ((long)(mcount))
33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 39#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
34 40
41#ifdef CONFIG_DYNAMIC_FTRACE
42#define ARCH_SUPPORTS_FTRACE_OPS 1
43#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
44#endif
45
35#ifndef __ASSEMBLY__ 46#ifndef __ASSEMBLY__
36extern void mcount(void); 47extern void mcount(void);
37extern atomic_t modifying_ftrace_code; 48extern atomic_t modifying_ftrace_code;
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 547882539157..d3ddd17405d0 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -27,6 +27,7 @@
27#include <asm/insn.h> 27#include <asm/insn.h>
28 28
29#define __ARCH_WANT_KPROBES_INSN_SLOT 29#define __ARCH_WANT_KPROBES_INSN_SLOT
30#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
30 31
31struct pt_regs; 32struct pt_regs;
32struct kprobe; 33struct kprobe;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 623f28837476..061ac17ee974 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller)
1109 pushl %eax 1109 pushl %eax
1110 pushl %ecx 1110 pushl %ecx
1111 pushl %edx 1111 pushl %edx
1112 movl 0xc(%esp), %eax 1112 pushl $0 /* Pass NULL as regs pointer */
1113 movl 4*4(%esp), %eax
1113 movl 0x4(%ebp), %edx 1114 movl 0x4(%ebp), %edx
1115 leal function_trace_op, %ecx
1114 subl $MCOUNT_INSN_SIZE, %eax 1116 subl $MCOUNT_INSN_SIZE, %eax
1115 1117
1116.globl ftrace_call 1118.globl ftrace_call
1117ftrace_call: 1119ftrace_call:
1118 call ftrace_stub 1120 call ftrace_stub
1119 1121
1122 addl $4,%esp /* skip NULL pointer */
1120 popl %edx 1123 popl %edx
1121 popl %ecx 1124 popl %ecx
1122 popl %eax 1125 popl %eax
1126ftrace_ret:
1123#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1127#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1124.globl ftrace_graph_call 1128.globl ftrace_graph_call
1125ftrace_graph_call: 1129ftrace_graph_call:
@@ -1131,6 +1135,72 @@ ftrace_stub:
1131 ret 1135 ret
1132END(ftrace_caller) 1136END(ftrace_caller)
1133 1137
1138ENTRY(ftrace_regs_caller)
1139 pushf /* push flags before compare (in cs location) */
1140 cmpl $0, function_trace_stop
1141 jne ftrace_restore_flags
1142
1143 /*
1144 * i386 does not save SS and ESP when coming from kernel.
1145 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1146 * Unfortunately, that means eflags must be at the same location
1147 * as the current return ip is. We move the return ip into the
1148 * ip location, and move flags into the return ip location.
1149 */
1150 pushl 4(%esp) /* save return ip into ip slot */
1151 subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
1152
1153 pushl $0 /* Load 0 into orig_ax */
1154 pushl %gs
1155 pushl %fs
1156 pushl %es
1157 pushl %ds
1158 pushl %eax
1159 pushl %ebp
1160 pushl %edi
1161 pushl %esi
1162 pushl %edx
1163 pushl %ecx
1164 pushl %ebx
1165
1166 movl 13*4(%esp), %eax /* Get the saved flags */
1167 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1168 /* clobbering return ip */
1169 movl $__KERNEL_CS,13*4(%esp)
1170
1171 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
1172 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1173 leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
1174 pushl %esp /* Save pt_regs as 4th parameter */
1175
1176GLOBAL(ftrace_regs_call)
1177 call ftrace_stub
1178
1179 addl $4, %esp /* Skip pt_regs */
1180 movl 14*4(%esp), %eax /* Move flags back into cs */
1181 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1182 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
1183 addl $MCOUNT_INSN_SIZE, %eax
1184 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1185
1186 popl %ebx
1187 popl %ecx
1188 popl %edx
1189 popl %esi
1190 popl %edi
1191 popl %ebp
1192 popl %eax
1193 popl %ds
1194 popl %es
1195 popl %fs
1196 popl %gs
1197 addl $8, %esp /* Skip orig_ax and ip */
1198 popf /* Pop flags at end (no addl to corrupt flags) */
1199 jmp ftrace_ret
1200
1201ftrace_restore_flags:
1202 popf
1203 jmp ftrace_stub
1134#else /* ! CONFIG_DYNAMIC_FTRACE */ 1204#else /* ! CONFIG_DYNAMIC_FTRACE */
1135 1205
1136ENTRY(mcount) 1206ENTRY(mcount)
@@ -1171,9 +1241,6 @@ END(mcount)
1171 1241
1172#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1242#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1173ENTRY(ftrace_graph_caller) 1243ENTRY(ftrace_graph_caller)
1174 cmpl $0, function_trace_stop
1175 jne ftrace_stub
1176
1177 pushl %eax 1244 pushl %eax
1178 pushl %ecx 1245 pushl %ecx
1179 pushl %edx 1246 pushl %edx
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 111f6bbd8b38..459d4a0dca8d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -73,20 +73,34 @@ ENTRY(mcount)
73 retq 73 retq
74END(mcount) 74END(mcount)
75 75
76/* skip is set if stack has been adjusted */
77.macro ftrace_caller_setup skip=0
78 MCOUNT_SAVE_FRAME \skip
79
80 /* Load the ftrace_ops into the 3rd parameter */
81 leaq function_trace_op, %rdx
82
83 /* Load ip into the first parameter */
84 movq RIP(%rsp), %rdi
85 subq $MCOUNT_INSN_SIZE, %rdi
86 /* Load the parent_ip into the second parameter */
87 movq 8(%rbp), %rsi
88.endm
89
76ENTRY(ftrace_caller) 90ENTRY(ftrace_caller)
91 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop 92 cmpl $0, function_trace_stop
78 jne ftrace_stub 93 jne ftrace_stub
79 94
80 MCOUNT_SAVE_FRAME 95 ftrace_caller_setup
81 96 /* regs go into 4th parameter (but make it NULL) */
82 movq 0x38(%rsp), %rdi 97 movq $0, %rcx
83 movq 8(%rbp), %rsi
84 subq $MCOUNT_INSN_SIZE, %rdi
85 98
86GLOBAL(ftrace_call) 99GLOBAL(ftrace_call)
87 call ftrace_stub 100 call ftrace_stub
88 101
89 MCOUNT_RESTORE_FRAME 102 MCOUNT_RESTORE_FRAME
103ftrace_return:
90 104
91#ifdef CONFIG_FUNCTION_GRAPH_TRACER 105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
92GLOBAL(ftrace_graph_call) 106GLOBAL(ftrace_graph_call)
@@ -97,6 +111,71 @@ GLOBAL(ftrace_stub)
97 retq 111 retq
98END(ftrace_caller) 112END(ftrace_caller)
99 113
114ENTRY(ftrace_regs_caller)
115 /* Save the current flags before compare (in SS location)*/
116 pushfq
117
118 /* Check if tracing was disabled (quick check) */
119 cmpl $0, function_trace_stop
120 jne ftrace_restore_flags
121
122 /* skip=8 to skip flags saved in SS */
123 ftrace_caller_setup 8
124
125 /* Save the rest of pt_regs */
126 movq %r15, R15(%rsp)
127 movq %r14, R14(%rsp)
128 movq %r13, R13(%rsp)
129 movq %r12, R12(%rsp)
130 movq %r11, R11(%rsp)
131 movq %r10, R10(%rsp)
132 movq %rbp, RBP(%rsp)
133 movq %rbx, RBX(%rsp)
134 /* Copy saved flags */
135 movq SS(%rsp), %rcx
136 movq %rcx, EFLAGS(%rsp)
137 /* Kernel segments */
138 movq $__KERNEL_DS, %rcx
139 movq %rcx, SS(%rsp)
140 movq $__KERNEL_CS, %rcx
141 movq %rcx, CS(%rsp)
142 /* Stack - skipping return address */
143 leaq SS+16(%rsp), %rcx
144 movq %rcx, RSP(%rsp)
145
146 /* regs go into 4th parameter */
147 leaq (%rsp), %rcx
148
149GLOBAL(ftrace_regs_call)
150 call ftrace_stub
151
152 /* Copy flags back to SS, to restore them */
153 movq EFLAGS(%rsp), %rax
154 movq %rax, SS(%rsp)
155
156 /* restore the rest of pt_regs */
157 movq R15(%rsp), %r15
158 movq R14(%rsp), %r14
159 movq R13(%rsp), %r13
160 movq R12(%rsp), %r12
161 movq R10(%rsp), %r10
162 movq RBP(%rsp), %rbp
163 movq RBX(%rsp), %rbx
164
165 /* skip=8 to skip flags saved in SS */
166 MCOUNT_RESTORE_FRAME 8
167
168 /* Restore flags */
169 popfq
170
171 jmp ftrace_return
172ftrace_restore_flags:
173 popfq
174 jmp ftrace_stub
175
176END(ftrace_regs_caller)
177
178
100#else /* ! CONFIG_DYNAMIC_FTRACE */ 179#else /* ! CONFIG_DYNAMIC_FTRACE */
101ENTRY(mcount) 180ENTRY(mcount)
102 cmpl $0, function_trace_stop 181 cmpl $0, function_trace_stop
@@ -119,7 +198,7 @@ GLOBAL(ftrace_stub)
119trace: 198trace:
120 MCOUNT_SAVE_FRAME 199 MCOUNT_SAVE_FRAME
121 200
122 movq 0x38(%rsp), %rdi 201 movq RIP(%rsp), %rdi
123 movq 8(%rbp), %rsi 202 movq 8(%rbp), %rsi
124 subq $MCOUNT_INSN_SIZE, %rdi 203 subq $MCOUNT_INSN_SIZE, %rdi
125 204
@@ -134,13 +213,10 @@ END(mcount)
134 213
135#ifdef CONFIG_FUNCTION_GRAPH_TRACER 214#ifdef CONFIG_FUNCTION_GRAPH_TRACER
136ENTRY(ftrace_graph_caller) 215ENTRY(ftrace_graph_caller)
137 cmpl $0, function_trace_stop
138 jne ftrace_stub
139
140 MCOUNT_SAVE_FRAME 216 MCOUNT_SAVE_FRAME
141 217
142 leaq 8(%rbp), %rdi 218 leaq 8(%rbp), %rdi
143 movq 0x38(%rsp), %rsi 219 movq RIP(%rsp), %rsi
144 movq (%rbp), %rdx 220 movq (%rbp), %rdx
145 subq $MCOUNT_INSN_SIZE, %rsi 221 subq $MCOUNT_INSN_SIZE, %rsi
146 222
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index c3a7cb4bf6e6..1d414029f1d8 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -206,6 +206,21 @@ static int
206ftrace_modify_code(unsigned long ip, unsigned const char *old_code, 206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207 unsigned const char *new_code); 207 unsigned const char *new_code);
208 208
209/*
210 * Should never be called:
211 * As it is only called by __ftrace_replace_code() which is called by
212 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
213 * which is called to turn mcount into nops or nops into function calls
214 * but not to convert a function from not using regs to one that uses
215 * regs, which ftrace_modify_call() is for.
216 */
217int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
218 unsigned long addr)
219{
220 WARN_ON(1);
221 return -EINVAL;
222}
223
209int ftrace_update_ftrace_func(ftrace_func_t func) 224int ftrace_update_ftrace_func(ftrace_func_t func)
210{ 225{
211 unsigned long ip = (unsigned long)(&ftrace_call); 226 unsigned long ip = (unsigned long)(&ftrace_call);
@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
220 235
221 ret = ftrace_modify_code(ip, old, new); 236 ret = ftrace_modify_code(ip, old, new);
222 237
238 /* Also update the regs callback function */
239 if (!ret) {
240 ip = (unsigned long)(&ftrace_regs_call);
241 memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
242 new = ftrace_call_replace(ip, (unsigned long)func);
243 ret = ftrace_modify_code(ip, old, new);
244 }
245
223 atomic_dec(&modifying_ftrace_code); 246 atomic_dec(&modifying_ftrace_code);
224 247
225 return ret; 248 return ret;
@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
299 return add_break(rec->ip, old); 322 return add_break(rec->ip, old);
300} 323}
301 324
325/*
326 * If the record has the FTRACE_FL_REGS set, that means that it
327 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
328 * is not not set, then it wants to convert to the normal callback.
329 */
330static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
331{
332 if (rec->flags & FTRACE_FL_REGS)
333 return (unsigned long)FTRACE_REGS_ADDR;
334 else
335 return (unsigned long)FTRACE_ADDR;
336}
337
338/*
339 * The FTRACE_FL_REGS_EN is set when the record already points to
340 * a function that saves all the regs. Basically the '_EN' version
341 * represents the current state of the function.
342 */
343static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
344{
345 if (rec->flags & FTRACE_FL_REGS_EN)
346 return (unsigned long)FTRACE_REGS_ADDR;
347 else
348 return (unsigned long)FTRACE_ADDR;
349}
350
302static int add_breakpoints(struct dyn_ftrace *rec, int enable) 351static int add_breakpoints(struct dyn_ftrace *rec, int enable)
303{ 352{
304 unsigned long ftrace_addr; 353 unsigned long ftrace_addr;
@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
306 355
307 ret = ftrace_test_record(rec, enable); 356 ret = ftrace_test_record(rec, enable);
308 357
309 ftrace_addr = (unsigned long)FTRACE_ADDR; 358 ftrace_addr = get_ftrace_addr(rec);
310 359
311 switch (ret) { 360 switch (ret) {
312 case FTRACE_UPDATE_IGNORE: 361 case FTRACE_UPDATE_IGNORE:
@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
316 /* converting nop to call */ 365 /* converting nop to call */
317 return add_brk_on_nop(rec); 366 return add_brk_on_nop(rec);
318 367
368 case FTRACE_UPDATE_MODIFY_CALL_REGS:
369 case FTRACE_UPDATE_MODIFY_CALL:
370 ftrace_addr = get_ftrace_old_addr(rec);
371 /* fall through */
319 case FTRACE_UPDATE_MAKE_NOP: 372 case FTRACE_UPDATE_MAKE_NOP:
320 /* converting a call to a nop */ 373 /* converting a call to a nop */
321 return add_brk_on_call(rec, ftrace_addr); 374 return add_brk_on_call(rec, ftrace_addr);
@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
360 * If not, don't touch the breakpoint, we make just create 413 * If not, don't touch the breakpoint, we make just create
361 * a disaster. 414 * a disaster.
362 */ 415 */
363 ftrace_addr = (unsigned long)FTRACE_ADDR; 416 ftrace_addr = get_ftrace_addr(rec);
417 nop = ftrace_call_replace(ip, ftrace_addr);
418
419 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
420 goto update;
421
422 /* Check both ftrace_addr and ftrace_old_addr */
423 ftrace_addr = get_ftrace_old_addr(rec);
364 nop = ftrace_call_replace(ip, ftrace_addr); 424 nop = ftrace_call_replace(ip, ftrace_addr);
365 425
366 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) 426 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
367 return -EINVAL; 427 return -EINVAL;
368 } 428 }
369 429
430 update:
370 return probe_kernel_write((void *)ip, &nop[0], 1); 431 return probe_kernel_write((void *)ip, &nop[0], 1);
371} 432}
372 433
@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable)
405 466
406 ret = ftrace_test_record(rec, enable); 467 ret = ftrace_test_record(rec, enable);
407 468
408 ftrace_addr = (unsigned long)FTRACE_ADDR; 469 ftrace_addr = get_ftrace_addr(rec);
409 470
410 switch (ret) { 471 switch (ret) {
411 case FTRACE_UPDATE_IGNORE: 472 case FTRACE_UPDATE_IGNORE:
412 return 0; 473 return 0;
413 474
475 case FTRACE_UPDATE_MODIFY_CALL_REGS:
476 case FTRACE_UPDATE_MODIFY_CALL:
414 case FTRACE_UPDATE_MAKE_CALL: 477 case FTRACE_UPDATE_MAKE_CALL:
415 /* converting nop to call */ 478 /* converting nop to call */
416 return add_update_call(rec, ftrace_addr); 479 return add_update_call(rec, ftrace_addr);
@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
455 518
456 ret = ftrace_update_record(rec, enable); 519 ret = ftrace_update_record(rec, enable);
457 520
458 ftrace_addr = (unsigned long)FTRACE_ADDR; 521 ftrace_addr = get_ftrace_addr(rec);
459 522
460 switch (ret) { 523 switch (ret) {
461 case FTRACE_UPDATE_IGNORE: 524 case FTRACE_UPDATE_IGNORE:
462 return 0; 525 return 0;
463 526
527 case FTRACE_UPDATE_MODIFY_CALL_REGS:
528 case FTRACE_UPDATE_MODIFY_CALL:
464 case FTRACE_UPDATE_MAKE_CALL: 529 case FTRACE_UPDATE_MAKE_CALL:
465 /* converting nop to call */ 530 /* converting nop to call */
466 return finish_update_call(rec, ftrace_addr); 531 return finish_update_call(rec, ftrace_addr);
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index e2f751efb7b1..47ae1023a93c 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1052 return 0; 1052 return 0;
1053} 1053}
1054 1054
1055#ifdef KPROBES_CAN_USE_FTRACE
1056/* Ftrace callback handler for kprobes */
1057void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
1058 struct ftrace_ops *ops, struct pt_regs *regs)
1059{
1060 struct kprobe *p;
1061 struct kprobe_ctlblk *kcb;
1062 unsigned long flags;
1063
1064 /* Disable irq for emulating a breakpoint and avoiding preempt */
1065 local_irq_save(flags);
1066
1067 p = get_kprobe((kprobe_opcode_t *)ip);
1068 if (unlikely(!p) || kprobe_disabled(p))
1069 goto end;
1070
1071 kcb = get_kprobe_ctlblk();
1072 if (kprobe_running()) {
1073 kprobes_inc_nmissed_count(p);
1074 } else {
1075 regs->ip += sizeof(kprobe_opcode_t);
1076
1077 __this_cpu_write(current_kprobe, p);
1078 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1079 if (p->pre_handler)
1080 p->pre_handler(p, regs);
1081
1082 if (unlikely(p->post_handler)) {
1083 /* Emulate singlestep as if there is a 5byte nop */
1084 regs->ip = ip + MCOUNT_INSN_SIZE;
1085 kcb->kprobe_status = KPROBE_HIT_SSDONE;
1086 p->post_handler(p, regs, 0);
1087 }
1088 __this_cpu_write(current_kprobe, NULL);
1089 regs->ip = ip; /* Recover for next callback */
1090 }
1091end:
1092 local_irq_restore(flags);
1093}
1094
1095int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
1096{
1097 p->ainsn.insn = NULL;
1098 p->ainsn.boostable = -1;
1099 return 0;
1100}
1101#endif
1102
1055int __init arch_init_kprobes(void) 1103int __init arch_init_kprobes(void)
1056{ 1104{
1057 return arch_init_optprobes(); 1105 return arch_init_optprobes();
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 55e6d63d46d0..a52f2f4fe030 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -10,6 +10,7 @@
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/ptrace.h>
13#include <linux/ktime.h> 14#include <linux/ktime.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/types.h> 16#include <linux/types.h>
@@ -18,6 +19,28 @@
18 19
19#include <asm/ftrace.h> 20#include <asm/ftrace.h>
20 21
22/*
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
26 */
27#ifndef ARCH_SUPPORTS_FTRACE_OPS
28#define ARCH_SUPPORTS_FTRACE_OPS 0
29#endif
30
31/*
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */
36#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
37 !ARCH_SUPPORTS_FTRACE_OPS
38# define FTRACE_FORCE_LIST_FUNC 1
39#else
40# define FTRACE_FORCE_LIST_FUNC 0
41#endif
42
43
21struct module; 44struct module;
22struct ftrace_hash; 45struct ftrace_hash;
23 46
@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
29 void __user *buffer, size_t *lenp, 52 void __user *buffer, size_t *lenp,
30 loff_t *ppos); 53 loff_t *ppos);
31 54
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 55struct ftrace_ops;
56
57typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
58 struct ftrace_ops *op, struct pt_regs *regs);
33 59
34/* 60/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 61 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
45 * could be controled by following calls: 71 * could be controled by following calls:
46 * ftrace_function_local_enable 72 * ftrace_function_local_enable
47 * ftrace_function_local_disable 73 * ftrace_function_local_disable
74 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
75 * and passed to the callback. If this flag is set, but the
76 * architecture does not support passing regs
77 * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
78 * ftrace_ops will fail to register, unless the next flag
79 * is set.
80 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
81 * handler can handle an arch that does not save regs
82 * (the handler tests if regs == NULL), then it can set
83 * this flag instead. It will not fail registering the ftrace_ops
84 * but, the regs field will be NULL if the arch does not support
85 * passing regs to the handler.
86 * Note, if this flag is set, the SAVE_REGS flag will automatically
87 * get set upon registering the ftrace_ops, if the arch supports it.
88 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
89 * that the call back has its own recursion protection. If it does
90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller.
48 */ 92 */
49enum { 93enum {
50 FTRACE_OPS_FL_ENABLED = 1 << 0, 94 FTRACE_OPS_FL_ENABLED = 1 << 0,
51 FTRACE_OPS_FL_GLOBAL = 1 << 1, 95 FTRACE_OPS_FL_GLOBAL = 1 << 1,
52 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 96 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3, 97 FTRACE_OPS_FL_CONTROL = 1 << 3,
98 FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
54}; 101};
55 102
56struct ftrace_ops { 103struct ftrace_ops {
@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
163 return *this_cpu_ptr(ops->disabled); 210 return *this_cpu_ptr(ops->disabled);
164} 211}
165 212
166extern void ftrace_stub(unsigned long a0, unsigned long a1); 213extern void ftrace_stub(unsigned long a0, unsigned long a1,
214 struct ftrace_ops *op, struct pt_regs *regs);
167 215
168#else /* !CONFIG_FUNCTION_TRACER */ 216#else /* !CONFIG_FUNCTION_TRACER */
169/* 217/*
@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
172 */ 220 */
173#define register_ftrace_function(ops) ({ 0; }) 221#define register_ftrace_function(ops) ({ 0; })
174#define unregister_ftrace_function(ops) ({ 0; }) 222#define unregister_ftrace_function(ops) ({ 0; })
223static inline int ftrace_nr_registered_ops(void)
224{
225 return 0;
226}
175static inline void clear_ftrace_function(void) { } 227static inline void clear_ftrace_function(void) { }
176static inline void ftrace_kill(void) { } 228static inline void ftrace_kill(void) { }
177static inline void ftrace_stop(void) { } 229static inline void ftrace_stop(void) { }
@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob);
227 279
228extern int ftrace_text_reserved(void *start, void *end); 280extern int ftrace_text_reserved(void *start, void *end);
229 281
282extern int ftrace_nr_registered_ops(void);
283
284/*
285 * The dyn_ftrace record's flags field is split into two parts.
286 * the first part which is '0-FTRACE_REF_MAX' is a counter of
287 * the number of callbacks that have registered the function that
288 * the dyn_ftrace descriptor represents.
289 *
290 * The second part is a mask:
291 * ENABLED - the function is being traced
292 * REGS - the record wants the function to save regs
293 * REGS_EN - the function is set up to save regs.
294 *
295 * When a new ftrace_ops is registered and wants a function to save
296 * pt_regs, the rec->flag REGS is set. When the function has been
297 * set up to save regs, the REG_EN flag is set. Once a function
298 * starts saving regs it will do so until all ftrace_ops are removed
299 * from tracing that function.
300 */
230enum { 301enum {
231 FTRACE_FL_ENABLED = (1 << 30), 302 FTRACE_FL_ENABLED = (1UL << 29),
303 FTRACE_FL_REGS = (1UL << 30),
304 FTRACE_FL_REGS_EN = (1UL << 31)
232}; 305};
233 306
234#define FTRACE_FL_MASK (0x3UL << 30) 307#define FTRACE_FL_MASK (0x7UL << 29)
235#define FTRACE_REF_MAX ((1 << 30) - 1) 308#define FTRACE_REF_MAX ((1UL << 29) - 1)
236 309
237struct dyn_ftrace { 310struct dyn_ftrace {
238 union { 311 union {
@@ -244,6 +317,8 @@ struct dyn_ftrace {
244}; 317};
245 318
246int ftrace_force_update(void); 319int ftrace_force_update(void);
320int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
321 int remove, int reset);
247int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 322int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
248 int len, int reset); 323 int len, int reset);
249int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 324int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -263,9 +338,23 @@ enum {
263 FTRACE_STOP_FUNC_RET = (1 << 4), 338 FTRACE_STOP_FUNC_RET = (1 << 4),
264}; 339};
265 340
341/*
342 * The FTRACE_UPDATE_* enum is used to pass information back
343 * from the ftrace_update_record() and ftrace_test_record()
344 * functions. These are called by the code update routines
345 * to find out what is to be done for a given function.
346 *
347 * IGNORE - The function is already what we want it to be
348 * MAKE_CALL - Start tracing the function
349 * MODIFY_CALL - Stop saving regs for the function
350 * MODIFY_CALL_REGS - Start saving regs for the function
351 * MAKE_NOP - Stop tracing the function
352 */
266enum { 353enum {
267 FTRACE_UPDATE_IGNORE, 354 FTRACE_UPDATE_IGNORE,
268 FTRACE_UPDATE_MAKE_CALL, 355 FTRACE_UPDATE_MAKE_CALL,
356 FTRACE_UPDATE_MODIFY_CALL,
357 FTRACE_UPDATE_MODIFY_CALL_REGS,
269 FTRACE_UPDATE_MAKE_NOP, 358 FTRACE_UPDATE_MAKE_NOP,
270}; 359};
271 360
@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data);
317extern void ftrace_replace_code(int enable); 406extern void ftrace_replace_code(int enable);
318extern int ftrace_update_ftrace_func(ftrace_func_t func); 407extern int ftrace_update_ftrace_func(ftrace_func_t func);
319extern void ftrace_caller(void); 408extern void ftrace_caller(void);
409extern void ftrace_regs_caller(void);
320extern void ftrace_call(void); 410extern void ftrace_call(void);
411extern void ftrace_regs_call(void);
321extern void mcount_call(void); 412extern void mcount_call(void);
322 413
323void ftrace_modify_all_code(int command); 414void ftrace_modify_all_code(int command);
@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command);
325#ifndef FTRACE_ADDR 416#ifndef FTRACE_ADDR
326#define FTRACE_ADDR ((unsigned long)ftrace_caller) 417#define FTRACE_ADDR ((unsigned long)ftrace_caller)
327#endif 418#endif
419
420#ifndef FTRACE_REGS_ADDR
421#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
422# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
423#else
424# define FTRACE_REGS_ADDR FTRACE_ADDR
425#endif
426#endif
427
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER 428#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329extern void ftrace_graph_caller(void); 429extern void ftrace_graph_caller(void);
330extern int ftrace_enable_ftrace_graph_caller(void); 430extern int ftrace_enable_ftrace_graph_caller(void);
@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod,
380 */ 480 */
381extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 481extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
382 482
483#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
484/**
485 * ftrace_modify_call - convert from one addr to another (no nop)
486 * @rec: the mcount call site record
487 * @old_addr: the address expected to be currently called to
488 * @addr: the address to change to
489 *
490 * This is a very sensitive operation and great care needs
491 * to be taken by the arch. The operation should carefully
492 * read the location, check to see if what is read is indeed
493 * what we expect it to be, and then on success of the compare,
494 * it should write to the location.
495 *
496 * The code segment at @rec->ip should be a caller to @old_addr
497 *
498 * Return must be:
499 * 0 on success
500 * -EFAULT on error reading the location
501 * -EINVAL on a failed compare of the contents
502 * -EPERM on error writing to the location
503 * Any other value will be considered a failure.
504 */
505extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
506 unsigned long addr);
507#else
508/* Should never be called */
509static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
510 unsigned long addr)
511{
512 return -EINVAL;
513}
514#endif
515
383/* May be defined in arch */ 516/* May be defined in arch */
384extern int ftrace_arch_read_dyn_info(char *buf, int size); 517extern int ftrace_arch_read_dyn_info(char *buf, int size);
385 518
@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip);
387 520
388extern void ftrace_disable_daemon(void); 521extern void ftrace_disable_daemon(void);
389extern void ftrace_enable_daemon(void); 522extern void ftrace_enable_daemon(void);
390#else 523#else /* CONFIG_DYNAMIC_FTRACE */
391static inline int skip_trace(unsigned long ip) { return 0; } 524static inline int skip_trace(unsigned long ip) { return 0; }
392static inline int ftrace_force_update(void) { return 0; } 525static inline int ftrace_force_update(void) { return 0; }
393static inline void ftrace_disable_daemon(void) { } 526static inline void ftrace_disable_daemon(void) { }
@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end)
405{ 538{
406 return 0; 539 return 0;
407} 540}
541static inline unsigned long ftrace_location(unsigned long ip)
542{
543 return 0;
544}
408 545
409/* 546/*
410 * Again users of functions that have ftrace_ops may not 547 * Again users of functions that have ftrace_ops may not
@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end)
413 */ 550 */
414#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 551#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
415#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 552#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
553#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
416#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 554#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
417#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 555#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
418#define ftrace_free_filter(ops) do { } while (0) 556#define ftrace_free_filter(ops) do { } while (0)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index b6e1f8c00577..23755ba42abc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -38,6 +38,7 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/rcupdate.h> 39#include <linux/rcupdate.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/ftrace.h>
41 42
42#ifdef CONFIG_KPROBES 43#ifdef CONFIG_KPROBES
43#include <asm/kprobes.h> 44#include <asm/kprobes.h>
@@ -48,14 +49,26 @@
48#define KPROBE_REENTER 0x00000004 49#define KPROBE_REENTER 0x00000004
49#define KPROBE_HIT_SSDONE 0x00000008 50#define KPROBE_HIT_SSDONE 0x00000008
50 51
52/*
53 * If function tracer is enabled and the arch supports full
54 * passing of pt_regs to function tracing, then kprobes can
55 * optimize on top of function tracing.
56 */
57#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
58 && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
59# define KPROBES_CAN_USE_FTRACE
60#endif
61
51/* Attach to insert probes on any functions which should be ignored*/ 62/* Attach to insert probes on any functions which should be ignored*/
52#define __kprobes __attribute__((__section__(".kprobes.text"))) 63#define __kprobes __attribute__((__section__(".kprobes.text")))
64
53#else /* CONFIG_KPROBES */ 65#else /* CONFIG_KPROBES */
54typedef int kprobe_opcode_t; 66typedef int kprobe_opcode_t;
55struct arch_specific_insn { 67struct arch_specific_insn {
56 int dummy; 68 int dummy;
57}; 69};
58#define __kprobes 70#define __kprobes
71
59#endif /* CONFIG_KPROBES */ 72#endif /* CONFIG_KPROBES */
60 73
61struct kprobe; 74struct kprobe;
@@ -128,6 +141,7 @@ struct kprobe {
128 * NOTE: 141 * NOTE:
129 * this flag is only for optimized_kprobe. 142 * this flag is only for optimized_kprobe.
130 */ 143 */
144#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
131 145
132/* Has this kprobe gone ? */ 146/* Has this kprobe gone ? */
133static inline int kprobe_gone(struct kprobe *p) 147static inline int kprobe_gone(struct kprobe *p)
@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
146{ 160{
147 return p->flags & KPROBE_FLAG_OPTIMIZED; 161 return p->flags & KPROBE_FLAG_OPTIMIZED;
148} 162}
163
164/* Is this kprobe uses ftrace ? */
165static inline int kprobe_ftrace(struct kprobe *p)
166{
167 return p->flags & KPROBE_FLAG_FTRACE;
168}
169
149/* 170/*
150 * Special probe type that uses setjmp-longjmp type tricks to resume 171 * Special probe type that uses setjmp-longjmp type tricks to resume
151 * execution at a specified entry with a matching prototype corresponding 172 * execution at a specified entry with a matching prototype corresponding
@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
295#endif 316#endif
296 317
297#endif /* CONFIG_OPTPROBES */ 318#endif /* CONFIG_OPTPROBES */
319#ifdef KPROBES_CAN_USE_FTRACE
320extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
321 struct ftrace_ops *ops, struct pt_regs *regs);
322extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
323#endif
324
298 325
299/* Get the kprobe at this addr (if any) - called with preemption disabled */ 326/* Get the kprobe at this addr (if any) - called with preemption disabled */
300struct kprobe *get_kprobe(void *addr); 327struct kprobe *get_kprobe(void *addr);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c62b8546cc90..35b4315d84f5 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
561{ 561{
562 LIST_HEAD(free_list); 562 LIST_HEAD(free_list);
563 563
564 mutex_lock(&kprobe_mutex);
564 /* Lock modules while optimizing kprobes */ 565 /* Lock modules while optimizing kprobes */
565 mutex_lock(&module_mutex); 566 mutex_lock(&module_mutex);
566 mutex_lock(&kprobe_mutex);
567 567
568 /* 568 /*
569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
586 /* Step 4: Free cleaned kprobes after quiesence period */ 586 /* Step 4: Free cleaned kprobes after quiesence period */
587 do_free_cleaned_kprobes(&free_list); 587 do_free_cleaned_kprobes(&free_list);
588 588
589 mutex_unlock(&kprobe_mutex);
590 mutex_unlock(&module_mutex); 589 mutex_unlock(&module_mutex);
590 mutex_unlock(&kprobe_mutex);
591 591
592 /* Step 5: Kick optimizer again if needed */ 592 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759 struct kprobe *ap; 759 struct kprobe *ap;
760 struct optimized_kprobe *op; 760 struct optimized_kprobe *op;
761 761
762 /* Impossible to optimize ftrace-based kprobe */
763 if (kprobe_ftrace(p))
764 return;
765
766 /* For preparing optimization, jump_label_text_reserved() is called */
767 jump_label_lock();
768 mutex_lock(&text_mutex);
769
762 ap = alloc_aggr_kprobe(p); 770 ap = alloc_aggr_kprobe(p);
763 if (!ap) 771 if (!ap)
764 return; 772 goto out;
765 773
766 op = container_of(ap, struct optimized_kprobe, kp); 774 op = container_of(ap, struct optimized_kprobe, kp);
767 if (!arch_prepared_optinsn(&op->optinsn)) { 775 if (!arch_prepared_optinsn(&op->optinsn)) {
768 /* If failed to setup optimizing, fallback to kprobe */ 776 /* If failed to setup optimizing, fallback to kprobe */
769 arch_remove_optimized_kprobe(op); 777 arch_remove_optimized_kprobe(op);
770 kfree(op); 778 kfree(op);
771 return; 779 goto out;
772 } 780 }
773 781
774 init_aggr_kprobe(ap, p); 782 init_aggr_kprobe(ap, p);
775 optimize_kprobe(ap); 783 optimize_kprobe(ap); /* This just kicks optimizer thread */
784
785out:
786 mutex_unlock(&text_mutex);
787 jump_label_unlock();
776} 788}
777 789
778#ifdef CONFIG_SYSCTL 790#ifdef CONFIG_SYSCTL
@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
907} 919}
908#endif /* CONFIG_OPTPROBES */ 920#endif /* CONFIG_OPTPROBES */
909 921
922#ifdef KPROBES_CAN_USE_FTRACE
923static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924 .func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS,
926};
927static int kprobe_ftrace_enabled;
928
929/* Must ensure p->addr is really on ftrace */
930static int __kprobes prepare_kprobe(struct kprobe *p)
931{
932 if (!kprobe_ftrace(p))
933 return arch_prepare_kprobe(p);
934
935 return arch_prepare_kprobe_ftrace(p);
936}
937
938/* Caller must lock kprobe_mutex */
939static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
940{
941 int ret;
942
943 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
944 (unsigned long)p->addr, 0, 0);
945 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
946 kprobe_ftrace_enabled++;
947 if (kprobe_ftrace_enabled == 1) {
948 ret = register_ftrace_function(&kprobe_ftrace_ops);
949 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950 }
951}
952
953/* Caller must lock kprobe_mutex */
954static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
955{
956 int ret;
957
958 kprobe_ftrace_enabled--;
959 if (kprobe_ftrace_enabled == 0) {
960 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
961 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
962 }
963 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
964 (unsigned long)p->addr, 1, 0);
965 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966}
967#else /* !KPROBES_CAN_USE_FTRACE */
968#define prepare_kprobe(p) arch_prepare_kprobe(p)
969#define arm_kprobe_ftrace(p) do {} while (0)
970#define disarm_kprobe_ftrace(p) do {} while (0)
971#endif
972
910/* Arm a kprobe with text_mutex */ 973/* Arm a kprobe with text_mutex */
911static void __kprobes arm_kprobe(struct kprobe *kp) 974static void __kprobes arm_kprobe(struct kprobe *kp)
912{ 975{
976 if (unlikely(kprobe_ftrace(kp))) {
977 arm_kprobe_ftrace(kp);
978 return;
979 }
913 /* 980 /*
914 * Here, since __arm_kprobe() doesn't use stop_machine(), 981 * Here, since __arm_kprobe() doesn't use stop_machine(),
915 * this doesn't cause deadlock on text_mutex. So, we don't 982 * this doesn't cause deadlock on text_mutex. So, we don't
@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
921} 988}
922 989
923/* Disarm a kprobe with text_mutex */ 990/* Disarm a kprobe with text_mutex */
924static void __kprobes disarm_kprobe(struct kprobe *kp) 991static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
925{ 992{
993 if (unlikely(kprobe_ftrace(kp))) {
994 disarm_kprobe_ftrace(kp);
995 return;
996 }
926 /* Ditto */ 997 /* Ditto */
927 mutex_lock(&text_mutex); 998 mutex_lock(&text_mutex);
928 __disarm_kprobe(kp, true); 999 __disarm_kprobe(kp, reopt);
929 mutex_unlock(&text_mutex); 1000 mutex_unlock(&text_mutex);
930} 1001}
931 1002
@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1144 if (p->post_handler && !ap->post_handler) 1215 if (p->post_handler && !ap->post_handler)
1145 ap->post_handler = aggr_post_handler; 1216 ap->post_handler = aggr_post_handler;
1146 1217
1147 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
1148 ap->flags &= ~KPROBE_FLAG_DISABLED;
1149 if (!kprobes_all_disarmed)
1150 /* Arm the breakpoint again. */
1151 __arm_kprobe(ap);
1152 }
1153 return 0; 1218 return 0;
1154} 1219}
1155 1220
@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1189 int ret = 0; 1254 int ret = 0;
1190 struct kprobe *ap = orig_p; 1255 struct kprobe *ap = orig_p;
1191 1256
1257 /* For preparing optimization, jump_label_text_reserved() is called */
1258 jump_label_lock();
1259 /*
1260 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1261 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1262 */
1263 get_online_cpus();
1264 mutex_lock(&text_mutex);
1265
1192 if (!kprobe_aggrprobe(orig_p)) { 1266 if (!kprobe_aggrprobe(orig_p)) {
1193 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1267 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1194 ap = alloc_aggr_kprobe(orig_p); 1268 ap = alloc_aggr_kprobe(orig_p);
1195 if (!ap) 1269 if (!ap) {
1196 return -ENOMEM; 1270 ret = -ENOMEM;
1271 goto out;
1272 }
1197 init_aggr_kprobe(ap, orig_p); 1273 init_aggr_kprobe(ap, orig_p);
1198 } else if (kprobe_unused(ap)) 1274 } else if (kprobe_unused(ap))
1199 /* This probe is going to die. Rescue it */ 1275 /* This probe is going to die. Rescue it */
@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1213 * free aggr_probe. It will be used next time, or 1289 * free aggr_probe. It will be used next time, or
1214 * freed by unregister_kprobe. 1290 * freed by unregister_kprobe.
1215 */ 1291 */
1216 return ret; 1292 goto out;
1217 1293
1218 /* Prepare optimized instructions if possible. */ 1294 /* Prepare optimized instructions if possible. */
1219 prepare_optimized_kprobe(ap); 1295 prepare_optimized_kprobe(ap);
@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1228 1304
1229 /* Copy ap's insn slot to p */ 1305 /* Copy ap's insn slot to p */
1230 copy_kprobe(ap, p); 1306 copy_kprobe(ap, p);
1231 return add_new_kprobe(ap, p); 1307 ret = add_new_kprobe(ap, p);
1308
1309out:
1310 mutex_unlock(&text_mutex);
1311 put_online_cpus();
1312 jump_label_unlock();
1313
1314 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1315 ap->flags &= ~KPROBE_FLAG_DISABLED;
1316 if (!kprobes_all_disarmed)
1317 /* Arm the breakpoint again. */
1318 arm_kprobe(ap);
1319 }
1320 return ret;
1232} 1321}
1233 1322
1234static int __kprobes in_kprobes_functions(unsigned long addr) 1323static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -1313,71 +1402,99 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1313 return ret; 1402 return ret;
1314} 1403}
1315 1404
1316int __kprobes register_kprobe(struct kprobe *p) 1405static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1406 struct module **probed_mod)
1317{ 1407{
1318 int ret = 0; 1408 int ret = 0;
1319 struct kprobe *old_p; 1409 unsigned long ftrace_addr;
1320 struct module *probed_mod;
1321 kprobe_opcode_t *addr;
1322
1323 addr = kprobe_addr(p);
1324 if (IS_ERR(addr))
1325 return PTR_ERR(addr);
1326 p->addr = addr;
1327 1410
1328 ret = check_kprobe_rereg(p); 1411 /*
1329 if (ret) 1412 * If the address is located on a ftrace nop, set the
1330 return ret; 1413 * breakpoint to the following instruction.
1414 */
1415 ftrace_addr = ftrace_location((unsigned long)p->addr);
1416 if (ftrace_addr) {
1417#ifdef KPROBES_CAN_USE_FTRACE
1418 /* Given address is not on the instruction boundary */
1419 if ((unsigned long)p->addr != ftrace_addr)
1420 return -EILSEQ;
1421 /* break_handler (jprobe) can not work with ftrace */
1422 if (p->break_handler)
1423 return -EINVAL;
1424 p->flags |= KPROBE_FLAG_FTRACE;
1425#else /* !KPROBES_CAN_USE_FTRACE */
1426 return -EINVAL;
1427#endif
1428 }
1331 1429
1332 jump_label_lock(); 1430 jump_label_lock();
1333 preempt_disable(); 1431 preempt_disable();
1432
1433 /* Ensure it is not in reserved area nor out of text */
1334 if (!kernel_text_address((unsigned long) p->addr) || 1434 if (!kernel_text_address((unsigned long) p->addr) ||
1335 in_kprobes_functions((unsigned long) p->addr) || 1435 in_kprobes_functions((unsigned long) p->addr) ||
1336 ftrace_text_reserved(p->addr, p->addr) ||
1337 jump_label_text_reserved(p->addr, p->addr)) { 1436 jump_label_text_reserved(p->addr, p->addr)) {
1338 ret = -EINVAL; 1437 ret = -EINVAL;
1339 goto cannot_probe; 1438 goto out;
1340 } 1439 }
1341 1440
1342 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1441 /* Check if are we probing a module */
1343 p->flags &= KPROBE_FLAG_DISABLED; 1442 *probed_mod = __module_text_address((unsigned long) p->addr);
1344 1443 if (*probed_mod) {
1345 /*
1346 * Check if are we probing a module.
1347 */
1348 probed_mod = __module_text_address((unsigned long) p->addr);
1349 if (probed_mod) {
1350 /* Return -ENOENT if fail. */
1351 ret = -ENOENT;
1352 /* 1444 /*
1353 * We must hold a refcount of the probed module while updating 1445 * We must hold a refcount of the probed module while updating
1354 * its code to prohibit unexpected unloading. 1446 * its code to prohibit unexpected unloading.
1355 */ 1447 */
1356 if (unlikely(!try_module_get(probed_mod))) 1448 if (unlikely(!try_module_get(*probed_mod))) {
1357 goto cannot_probe; 1449 ret = -ENOENT;
1450 goto out;
1451 }
1358 1452
1359 /* 1453 /*
1360 * If the module freed .init.text, we couldn't insert 1454 * If the module freed .init.text, we couldn't insert
1361 * kprobes in there. 1455 * kprobes in there.
1362 */ 1456 */
1363 if (within_module_init((unsigned long)p->addr, probed_mod) && 1457 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1364 probed_mod->state != MODULE_STATE_COMING) { 1458 (*probed_mod)->state != MODULE_STATE_COMING) {
1365 module_put(probed_mod); 1459 module_put(*probed_mod);
1366 goto cannot_probe; 1460 *probed_mod = NULL;
1461 ret = -ENOENT;
1367 } 1462 }
1368 /* ret will be updated by following code */
1369 } 1463 }
1464out:
1370 preempt_enable(); 1465 preempt_enable();
1371 jump_label_unlock(); 1466 jump_label_unlock();
1372 1467
1468 return ret;
1469}
1470
1471int __kprobes register_kprobe(struct kprobe *p)
1472{
1473 int ret;
1474 struct kprobe *old_p;
1475 struct module *probed_mod;
1476 kprobe_opcode_t *addr;
1477
1478 /* Adjust probe address from symbol */
1479 addr = kprobe_addr(p);
1480 if (IS_ERR(addr))
1481 return PTR_ERR(addr);
1482 p->addr = addr;
1483
1484 ret = check_kprobe_rereg(p);
1485 if (ret)
1486 return ret;
1487
1488 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1489 p->flags &= KPROBE_FLAG_DISABLED;
1373 p->nmissed = 0; 1490 p->nmissed = 0;
1374 INIT_LIST_HEAD(&p->list); 1491 INIT_LIST_HEAD(&p->list);
1375 mutex_lock(&kprobe_mutex);
1376 1492
1377 jump_label_lock(); /* needed to call jump_label_text_reserved() */ 1493 ret = check_kprobe_address_safe(p, &probed_mod);
1494 if (ret)
1495 return ret;
1378 1496
1379 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1497 mutex_lock(&kprobe_mutex);
1380 mutex_lock(&text_mutex);
1381 1498
1382 old_p = get_kprobe(p->addr); 1499 old_p = get_kprobe(p->addr);
1383 if (old_p) { 1500 if (old_p) {
@@ -1386,7 +1503,9 @@ int __kprobes register_kprobe(struct kprobe *p)
1386 goto out; 1503 goto out;
1387 } 1504 }
1388 1505
1389 ret = arch_prepare_kprobe(p); 1506 mutex_lock(&text_mutex); /* Avoiding text modification */
1507 ret = prepare_kprobe(p);
1508 mutex_unlock(&text_mutex);
1390 if (ret) 1509 if (ret)
1391 goto out; 1510 goto out;
1392 1511
@@ -1395,26 +1514,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1395 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1514 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1396 1515
1397 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1516 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1398 __arm_kprobe(p); 1517 arm_kprobe(p);
1399 1518
1400 /* Try to optimize kprobe */ 1519 /* Try to optimize kprobe */
1401 try_to_optimize_kprobe(p); 1520 try_to_optimize_kprobe(p);
1402 1521
1403out: 1522out:
1404 mutex_unlock(&text_mutex);
1405 put_online_cpus();
1406 jump_label_unlock();
1407 mutex_unlock(&kprobe_mutex); 1523 mutex_unlock(&kprobe_mutex);
1408 1524
1409 if (probed_mod) 1525 if (probed_mod)
1410 module_put(probed_mod); 1526 module_put(probed_mod);
1411 1527
1412 return ret; 1528 return ret;
1413
1414cannot_probe:
1415 preempt_enable();
1416 jump_label_unlock();
1417 return ret;
1418} 1529}
1419EXPORT_SYMBOL_GPL(register_kprobe); 1530EXPORT_SYMBOL_GPL(register_kprobe);
1420 1531
@@ -1451,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1451 1562
1452 /* Try to disarm and disable this/parent probe */ 1563 /* Try to disarm and disable this/parent probe */
1453 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1564 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1454 disarm_kprobe(orig_p); 1565 disarm_kprobe(orig_p, true);
1455 orig_p->flags |= KPROBE_FLAG_DISABLED; 1566 orig_p->flags |= KPROBE_FLAG_DISABLED;
1456 } 1567 }
1457 } 1568 }
@@ -2049,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2049 2160
2050 if (!pp) 2161 if (!pp)
2051 pp = p; 2162 pp = p;
2052 seq_printf(pi, "%s%s%s\n", 2163 seq_printf(pi, "%s%s%s%s\n",
2053 (kprobe_gone(p) ? "[GONE]" : ""), 2164 (kprobe_gone(p) ? "[GONE]" : ""),
2054 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2165 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2055 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 2166 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2167 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2056} 2168}
2057 2169
2058static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2170static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2131,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void)
2131 goto already_enabled; 2243 goto already_enabled;
2132 2244
2133 /* Arming kprobes doesn't optimize kprobe itself */ 2245 /* Arming kprobes doesn't optimize kprobe itself */
2134 mutex_lock(&text_mutex);
2135 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2246 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2136 head = &kprobe_table[i]; 2247 head = &kprobe_table[i];
2137 hlist_for_each_entry_rcu(p, node, head, hlist) 2248 hlist_for_each_entry_rcu(p, node, head, hlist)
2138 if (!kprobe_disabled(p)) 2249 if (!kprobe_disabled(p))
2139 __arm_kprobe(p); 2250 arm_kprobe(p);
2140 } 2251 }
2141 mutex_unlock(&text_mutex);
2142 2252
2143 kprobes_all_disarmed = false; 2253 kprobes_all_disarmed = false;
2144 printk(KERN_INFO "Kprobes globally enabled\n"); 2254 printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2166,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void)
2166 kprobes_all_disarmed = true; 2276 kprobes_all_disarmed = true;
2167 printk(KERN_INFO "Kprobes globally disabled\n"); 2277 printk(KERN_INFO "Kprobes globally disabled\n");
2168 2278
2169 mutex_lock(&text_mutex);
2170 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2279 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2171 head = &kprobe_table[i]; 2280 head = &kprobe_table[i];
2172 hlist_for_each_entry_rcu(p, node, head, hlist) { 2281 hlist_for_each_entry_rcu(p, node, head, hlist) {
2173 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2282 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2174 __disarm_kprobe(p, false); 2283 disarm_kprobe(p, false);
2175 } 2284 }
2176 } 2285 }
2177 mutex_unlock(&text_mutex);
2178 mutex_unlock(&kprobe_mutex); 2286 mutex_unlock(&kprobe_mutex);
2179 2287
2180 /* Wait for disarming all kprobes by optimizer */ 2288 /* Wait for disarming all kprobes by optimizer */
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b831087c8200..837090808aac 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER
5ORIG_CFLAGS := $(KBUILD_CFLAGS) 5ORIG_CFLAGS := $(KBUILD_CFLAGS)
6KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 6KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
7 7
8ifdef CONFIG_FTRACE_SELFTEST
8# selftest needs instrumentation 9# selftest needs instrumentation
9CFLAGS_trace_selftest_dynamic.o = -pg 10CFLAGS_trace_selftest_dynamic.o = -pg
10obj-y += trace_selftest_dynamic.o 11obj-y += trace_selftest_dynamic.o
11endif 12endif
13endif
12 14
13# If unlikely tracing is enabled, do not trace these files 15# If unlikely tracing is enabled, do not trace these files
14ifdef CONFIG_TRACING_BRANCHES 16ifdef CONFIG_TRACING_BRANCHES
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b4f20fba09fc..9dcf15d38380 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,12 +64,20 @@
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 66
67static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub,
69 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
70};
71
67/* ftrace_enabled is a method to turn ftrace on or off */ 72/* ftrace_enabled is a method to turn ftrace on or off */
68int ftrace_enabled __read_mostly; 73int ftrace_enabled __read_mostly;
69static int last_ftrace_enabled; 74static int last_ftrace_enabled;
70 75
71/* Quick disabling of function tracer. */ 76/* Quick disabling of function tracer. */
72int function_trace_stop; 77int function_trace_stop __read_mostly;
78
79/* Current function tracing op */
80struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
73 81
74/* List for set_ftrace_pid's pids. */ 82/* List for set_ftrace_pid's pids. */
75LIST_HEAD(ftrace_pids); 83LIST_HEAD(ftrace_pids);
@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly;
86 94
87static DEFINE_MUTEX(ftrace_lock); 95static DEFINE_MUTEX(ftrace_lock);
88 96
89static struct ftrace_ops ftrace_list_end __read_mostly = {
90 .func = ftrace_stub,
91};
92
93static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; 97static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 98static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 99static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
96ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 100ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
97static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
98ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
99ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 101ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100static struct ftrace_ops global_ops; 102static struct ftrace_ops global_ops;
101static struct ftrace_ops control_ops; 103static struct ftrace_ops control_ops;
102 104
103static void 105#if ARCH_SUPPORTS_FTRACE_OPS
104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 106static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
107 struct ftrace_ops *op, struct pt_regs *regs);
108#else
109/* See comment below, where ftrace_ops_list_func is defined */
110static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
111#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
112#endif
113
114/**
115 * ftrace_nr_registered_ops - return number of ops registered
116 *
117 * Returns the number of ftrace_ops registered and tracing functions
118 */
119int ftrace_nr_registered_ops(void)
120{
121 struct ftrace_ops *ops;
122 int cnt = 0;
123
124 mutex_lock(&ftrace_lock);
125
126 for (ops = ftrace_ops_list;
127 ops != &ftrace_list_end; ops = ops->next)
128 cnt++;
129
130 mutex_unlock(&ftrace_lock);
131
132 return cnt;
133}
105 134
106/* 135/*
107 * Traverse the ftrace_global_list, invoking all entries. The reason that we 136 * Traverse the ftrace_global_list, invoking all entries. The reason that we
@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
112 * 141 *
113 * Silly Alpha and silly pointer-speculation compiler optimizations! 142 * Silly Alpha and silly pointer-speculation compiler optimizations!
114 */ 143 */
115static void ftrace_global_list_func(unsigned long ip, 144static void
116 unsigned long parent_ip) 145ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
146 struct ftrace_ops *op, struct pt_regs *regs)
117{ 147{
118 struct ftrace_ops *op;
119
120 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) 148 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121 return; 149 return;
122 150
123 trace_recursion_set(TRACE_GLOBAL_BIT); 151 trace_recursion_set(TRACE_GLOBAL_BIT);
124 op = rcu_dereference_raw(ftrace_global_list); /*see above*/ 152 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125 while (op != &ftrace_list_end) { 153 while (op != &ftrace_list_end) {
126 op->func(ip, parent_ip); 154 op->func(ip, parent_ip, op, regs);
127 op = rcu_dereference_raw(op->next); /*see above*/ 155 op = rcu_dereference_raw(op->next); /*see above*/
128 }; 156 };
129 trace_recursion_clear(TRACE_GLOBAL_BIT); 157 trace_recursion_clear(TRACE_GLOBAL_BIT);
130} 158}
131 159
132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) 160static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
161 struct ftrace_ops *op, struct pt_regs *regs)
133{ 162{
134 if (!test_tsk_trace_trace(current)) 163 if (!test_tsk_trace_trace(current))
135 return; 164 return;
136 165
137 ftrace_pid_function(ip, parent_ip); 166 ftrace_pid_function(ip, parent_ip, op, regs);
138} 167}
139 168
140static void set_ftrace_pid_function(ftrace_func_t func) 169static void set_ftrace_pid_function(ftrace_func_t func)
@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func)
153void clear_ftrace_function(void) 182void clear_ftrace_function(void)
154{ 183{
155 ftrace_trace_function = ftrace_stub; 184 ftrace_trace_function = ftrace_stub;
156 __ftrace_trace_function = ftrace_stub;
157 __ftrace_trace_function_delay = ftrace_stub;
158 ftrace_pid_function = ftrace_stub; 185 ftrace_pid_function = ftrace_stub;
159} 186}
160 187
161#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
162/*
163 * For those archs that do not test ftrace_trace_stop in their
164 * mcount call site, we need to do it from C.
165 */
166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
167{
168 if (function_trace_stop)
169 return;
170
171 __ftrace_trace_function(ip, parent_ip);
172}
173#endif
174
175static void control_ops_disable_all(struct ftrace_ops *ops) 188static void control_ops_disable_all(struct ftrace_ops *ops)
176{ 189{
177 int cpu; 190 int cpu;
@@ -230,28 +243,27 @@ static void update_ftrace_function(void)
230 243
231 /* 244 /*
232 * If we are at the end of the list and this ops is 245 * If we are at the end of the list and this ops is
233 * not dynamic, then have the mcount trampoline call 246 * recursion safe and not dynamic and the arch supports passing ops,
234 * the function directly 247 * then have the mcount trampoline call the function directly.
235 */ 248 */
236 if (ftrace_ops_list == &ftrace_list_end || 249 if (ftrace_ops_list == &ftrace_list_end ||
237 (ftrace_ops_list->next == &ftrace_list_end && 250 (ftrace_ops_list->next == &ftrace_list_end &&
238 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) 251 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
252 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
253 !FTRACE_FORCE_LIST_FUNC)) {
254 /* Set the ftrace_ops that the arch callback uses */
255 if (ftrace_ops_list == &global_ops)
256 function_trace_op = ftrace_global_list;
257 else
258 function_trace_op = ftrace_ops_list;
239 func = ftrace_ops_list->func; 259 func = ftrace_ops_list->func;
240 else 260 } else {
261 /* Just use the default ftrace_ops */
262 function_trace_op = &ftrace_list_end;
241 func = ftrace_ops_list_func; 263 func = ftrace_ops_list_func;
264 }
242 265
243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244 ftrace_trace_function = func; 266 ftrace_trace_function = func;
245#else
246#ifdef CONFIG_DYNAMIC_FTRACE
247 /* do not update till all functions have been modified */
248 __ftrace_trace_function_delay = func;
249#else
250 __ftrace_trace_function = func;
251#endif
252 ftrace_trace_function =
253 (func == ftrace_stub) ? func : ftrace_test_stop_func;
254#endif
255} 267}
256 268
257static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 269static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
325 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) 337 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
326 return -EINVAL; 338 return -EINVAL;
327 339
340#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
341 /*
342 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
343 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
344 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
345 */
346 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
347 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
348 return -EINVAL;
349
350 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
351 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
352#endif
353
328 if (!core_kernel_data((unsigned long)ops)) 354 if (!core_kernel_data((unsigned long)ops))
329 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 355 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
330 356
@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
773} 799}
774 800
775static void 801static void
776function_profile_call(unsigned long ip, unsigned long parent_ip) 802function_profile_call(unsigned long ip, unsigned long parent_ip,
803 struct ftrace_ops *ops, struct pt_regs *regs)
777{ 804{
778 struct ftrace_profile_stat *stat; 805 struct ftrace_profile_stat *stat;
779 struct ftrace_profile *rec; 806 struct ftrace_profile *rec;
@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
803#ifdef CONFIG_FUNCTION_GRAPH_TRACER 830#ifdef CONFIG_FUNCTION_GRAPH_TRACER
804static int profile_graph_entry(struct ftrace_graph_ent *trace) 831static int profile_graph_entry(struct ftrace_graph_ent *trace)
805{ 832{
806 function_profile_call(trace->func, 0); 833 function_profile_call(trace->func, 0, NULL, NULL);
807 return 1; 834 return 1;
808} 835}
809 836
@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void)
863#else 890#else
864static struct ftrace_ops ftrace_profile_ops __read_mostly = { 891static struct ftrace_ops ftrace_profile_ops __read_mostly = {
865 .func = function_profile_call, 892 .func = function_profile_call,
893 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
866}; 894};
867 895
868static int register_ftrace_profiler(void) 896static int register_ftrace_profiler(void)
@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = {
1045 .func = ftrace_stub, 1073 .func = ftrace_stub,
1046 .notrace_hash = EMPTY_HASH, 1074 .notrace_hash = EMPTY_HASH,
1047 .filter_hash = EMPTY_HASH, 1075 .filter_hash = EMPTY_HASH,
1076 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
1048}; 1077};
1049 1078
1050static DEFINE_MUTEX(ftrace_regex_lock); 1079static DEFINE_MUTEX(ftrace_regex_lock);
@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1525 rec->flags++; 1554 rec->flags++;
1526 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) 1555 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1527 return; 1556 return;
1557 /*
1558 * If any ops wants regs saved for this function
1559 * then all ops will get saved regs.
1560 */
1561 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1562 rec->flags |= FTRACE_FL_REGS;
1528 } else { 1563 } else {
1529 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) 1564 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1530 return; 1565 return;
@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1616 if (enable && (rec->flags & ~FTRACE_FL_MASK)) 1651 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1617 flag = FTRACE_FL_ENABLED; 1652 flag = FTRACE_FL_ENABLED;
1618 1653
1654 /*
1655 * If enabling and the REGS flag does not match the REGS_EN, then
1656 * do not ignore this record. Set flags to fail the compare against
1657 * ENABLED.
1658 */
1659 if (flag &&
1660 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1661 flag |= FTRACE_FL_REGS;
1662
1619 /* If the state of this record hasn't changed, then do nothing */ 1663 /* If the state of this record hasn't changed, then do nothing */
1620 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 1664 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1621 return FTRACE_UPDATE_IGNORE; 1665 return FTRACE_UPDATE_IGNORE;
1622 1666
1623 if (flag) { 1667 if (flag) {
1624 if (update) 1668 /* Save off if rec is being enabled (for return value) */
1669 flag ^= rec->flags & FTRACE_FL_ENABLED;
1670
1671 if (update) {
1625 rec->flags |= FTRACE_FL_ENABLED; 1672 rec->flags |= FTRACE_FL_ENABLED;
1626 return FTRACE_UPDATE_MAKE_CALL; 1673 if (flag & FTRACE_FL_REGS) {
1674 if (rec->flags & FTRACE_FL_REGS)
1675 rec->flags |= FTRACE_FL_REGS_EN;
1676 else
1677 rec->flags &= ~FTRACE_FL_REGS_EN;
1678 }
1679 }
1680
1681 /*
1682 * If this record is being updated from a nop, then
1683 * return UPDATE_MAKE_CALL.
1684 * Otherwise, if the EN flag is set, then return
1685 * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1686 * from the non-save regs, to a save regs function.
1687 * Otherwise,
1688 * return UPDATE_MODIFY_CALL to tell the caller to convert
1689 * from the save regs, to a non-save regs function.
1690 */
1691 if (flag & FTRACE_FL_ENABLED)
1692 return FTRACE_UPDATE_MAKE_CALL;
1693 else if (rec->flags & FTRACE_FL_REGS_EN)
1694 return FTRACE_UPDATE_MODIFY_CALL_REGS;
1695 else
1696 return FTRACE_UPDATE_MODIFY_CALL;
1627 } 1697 }
1628 1698
1629 if (update) 1699 if (update) {
1630 rec->flags &= ~FTRACE_FL_ENABLED; 1700 /* If there's no more users, clear all flags */
1701 if (!(rec->flags & ~FTRACE_FL_MASK))
1702 rec->flags = 0;
1703 else
1704 /* Just disable the record (keep REGS state) */
1705 rec->flags &= ~FTRACE_FL_ENABLED;
1706 }
1631 1707
1632 return FTRACE_UPDATE_MAKE_NOP; 1708 return FTRACE_UPDATE_MAKE_NOP;
1633} 1709}
@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1662static int 1738static int
1663__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1739__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1664{ 1740{
1741 unsigned long ftrace_old_addr;
1665 unsigned long ftrace_addr; 1742 unsigned long ftrace_addr;
1666 int ret; 1743 int ret;
1667 1744
1668 ftrace_addr = (unsigned long)FTRACE_ADDR;
1669
1670 ret = ftrace_update_record(rec, enable); 1745 ret = ftrace_update_record(rec, enable);
1671 1746
1747 if (rec->flags & FTRACE_FL_REGS)
1748 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1749 else
1750 ftrace_addr = (unsigned long)FTRACE_ADDR;
1751
1672 switch (ret) { 1752 switch (ret) {
1673 case FTRACE_UPDATE_IGNORE: 1753 case FTRACE_UPDATE_IGNORE:
1674 return 0; 1754 return 0;
@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1678 1758
1679 case FTRACE_UPDATE_MAKE_NOP: 1759 case FTRACE_UPDATE_MAKE_NOP:
1680 return ftrace_make_nop(NULL, rec, ftrace_addr); 1760 return ftrace_make_nop(NULL, rec, ftrace_addr);
1761
1762 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1763 case FTRACE_UPDATE_MODIFY_CALL:
1764 if (rec->flags & FTRACE_FL_REGS)
1765 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1766 else
1767 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1768
1769 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1681 } 1770 }
1682 1771
1683 return -1; /* unknow ftrace bug */ 1772 return -1; /* unknow ftrace bug */
@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command)
1882 */ 1971 */
1883 arch_ftrace_update_code(command); 1972 arch_ftrace_update_code(command);
1884 1973
1885#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1886 /*
1887 * For archs that call ftrace_test_stop_func(), we must
1888 * wait till after we update all the function callers
1889 * before we update the callback. This keeps different
1890 * ops that record different functions from corrupting
1891 * each other.
1892 */
1893 __ftrace_trace_function = __ftrace_trace_function_delay;
1894#endif
1895 function_trace_stop--; 1974 function_trace_stop--;
1896 1975
1897 ret = ftrace_arch_code_modify_post_process(); 1976 ret = ftrace_arch_code_modify_post_process();
@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v)
2441 2520
2442 seq_printf(m, "%ps", (void *)rec->ip); 2521 seq_printf(m, "%ps", (void *)rec->ip);
2443 if (iter->flags & FTRACE_ITER_ENABLED) 2522 if (iter->flags & FTRACE_ITER_ENABLED)
2444 seq_printf(m, " (%ld)", 2523 seq_printf(m, " (%ld)%s",
2445 rec->flags & ~FTRACE_FL_MASK); 2524 rec->flags & ~FTRACE_FL_MASK,
2525 rec->flags & FTRACE_FL_REGS ? " R" : "");
2446 seq_printf(m, "\n"); 2526 seq_printf(m, "\n");
2447 2527
2448 return 0; 2528 return 0;
@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void)
2790} 2870}
2791device_initcall(ftrace_mod_cmd_init); 2871device_initcall(ftrace_mod_cmd_init);
2792 2872
2793static void 2873static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2794function_trace_probe_call(unsigned long ip, unsigned long parent_ip) 2874 struct ftrace_ops *op, struct pt_regs *pt_regs)
2795{ 2875{
2796 struct ftrace_func_probe *entry; 2876 struct ftrace_func_probe *entry;
2797 struct hlist_head *hhd; 2877 struct hlist_head *hhd;
@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
3162} 3242}
3163 3243
3164static int 3244static int
3165ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 3245ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3166 int reset, int enable) 3246{
3247 struct ftrace_func_entry *entry;
3248
3249 if (!ftrace_location(ip))
3250 return -EINVAL;
3251
3252 if (remove) {
3253 entry = ftrace_lookup_ip(hash, ip);
3254 if (!entry)
3255 return -ENOENT;
3256 free_hash_entry(hash, entry);
3257 return 0;
3258 }
3259
3260 return add_hash_entry(hash, ip);
3261}
3262
3263static int
3264ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3265 unsigned long ip, int remove, int reset, int enable)
3167{ 3266{
3168 struct ftrace_hash **orig_hash; 3267 struct ftrace_hash **orig_hash;
3169 struct ftrace_hash *hash; 3268 struct ftrace_hash *hash;
@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3192 ret = -EINVAL; 3291 ret = -EINVAL;
3193 goto out_regex_unlock; 3292 goto out_regex_unlock;
3194 } 3293 }
3294 if (ip) {
3295 ret = ftrace_match_addr(hash, ip, remove);
3296 if (ret < 0)
3297 goto out_regex_unlock;
3298 }
3195 3299
3196 mutex_lock(&ftrace_lock); 3300 mutex_lock(&ftrace_lock);
3197 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3301 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3208 return ret; 3312 return ret;
3209} 3313}
3210 3314
3315static int
3316ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3317 int reset, int enable)
3318{
3319 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3320}
3321
3322/**
3323 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3324 * @ops - the ops to set the filter with
3325 * @ip - the address to add to or remove from the filter.
3326 * @remove - non zero to remove the ip from the filter
3327 * @reset - non zero to reset all filters before applying this filter.
3328 *
3329 * Filters denote which functions should be enabled when tracing is enabled
3330 * If @ip is NULL, it failes to update filter.
3331 */
3332int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3333 int remove, int reset)
3334{
3335 return ftrace_set_addr(ops, ip, remove, reset, 1);
3336}
3337EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3338
3339static int
3340ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3341 int reset, int enable)
3342{
3343 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3344}
3345
3211/** 3346/**
3212 * ftrace_set_filter - set a function to filter on in ftrace 3347 * ftrace_set_filter - set a function to filter on in ftrace
3213 * @ops - the ops to set the filter with 3348 * @ops - the ops to set the filter with
@@ -3912,6 +4047,7 @@ void __init ftrace_init(void)
3912 4047
3913static struct ftrace_ops global_ops = { 4048static struct ftrace_ops global_ops = {
3914 .func = ftrace_stub, 4049 .func = ftrace_stub,
4050 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3915}; 4051};
3916 4052
3917static int __init ftrace_nodyn_init(void) 4053static int __init ftrace_nodyn_init(void)
@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3942#endif /* CONFIG_DYNAMIC_FTRACE */ 4078#endif /* CONFIG_DYNAMIC_FTRACE */
3943 4079
3944static void 4080static void
3945ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) 4081ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4082 struct ftrace_ops *op, struct pt_regs *regs)
3946{ 4083{
3947 struct ftrace_ops *op;
3948
3949 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) 4084 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3950 return; 4085 return;
3951 4086
@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3959 while (op != &ftrace_list_end) { 4094 while (op != &ftrace_list_end) {
3960 if (!ftrace_function_local_disabled(op) && 4095 if (!ftrace_function_local_disabled(op) &&
3961 ftrace_ops_test(op, ip)) 4096 ftrace_ops_test(op, ip))
3962 op->func(ip, parent_ip); 4097 op->func(ip, parent_ip, op, regs);
3963 4098
3964 op = rcu_dereference_raw(op->next); 4099 op = rcu_dereference_raw(op->next);
3965 }; 4100 };
@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3969 4104
3970static struct ftrace_ops control_ops = { 4105static struct ftrace_ops control_ops = {
3971 .func = ftrace_ops_control_func, 4106 .func = ftrace_ops_control_func,
4107 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3972}; 4108};
3973 4109
3974static void 4110static inline void
3975ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) 4111__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4112 struct ftrace_ops *ignored, struct pt_regs *regs)
3976{ 4113{
3977 struct ftrace_ops *op; 4114 struct ftrace_ops *op;
3978 4115
4116 if (function_trace_stop)
4117 return;
4118
3979 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) 4119 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3980 return; 4120 return;
3981 4121
@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3988 op = rcu_dereference_raw(ftrace_ops_list); 4128 op = rcu_dereference_raw(ftrace_ops_list);
3989 while (op != &ftrace_list_end) { 4129 while (op != &ftrace_list_end) {
3990 if (ftrace_ops_test(op, ip)) 4130 if (ftrace_ops_test(op, ip))
3991 op->func(ip, parent_ip); 4131 op->func(ip, parent_ip, op, regs);
3992 op = rcu_dereference_raw(op->next); 4132 op = rcu_dereference_raw(op->next);
3993 }; 4133 };
3994 preempt_enable_notrace(); 4134 preempt_enable_notrace();
3995 trace_recursion_clear(TRACE_INTERNAL_BIT); 4135 trace_recursion_clear(TRACE_INTERNAL_BIT);
3996} 4136}
3997 4137
4138/*
4139 * Some archs only support passing ip and parent_ip. Even though
4140 * the list function ignores the op parameter, we do not want any
4141 * C side effects, where a function is called without the caller
4142 * sending a third parameter.
4143 * Archs are to support both the regs and ftrace_ops at the same time.
4144 * If they support ftrace_ops, it is assumed they support regs.
4145 * If call backs want to use regs, they must either check for regs
4146 * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
4147 * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
4148 * An architecture can pass partial regs with ftrace_ops and still
4149 * set the ARCH_SUPPORT_FTARCE_OPS.
4150 */
4151#if ARCH_SUPPORTS_FTRACE_OPS
4152static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4153 struct ftrace_ops *op, struct pt_regs *regs)
4154{
4155 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4156}
4157#else
4158static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4159{
4160 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4161}
4162#endif
4163
3998static void clear_ftrace_swapper(void) 4164static void clear_ftrace_swapper(void)
3999{ 4165{
4000 struct task_struct *p; 4166 struct task_struct *p;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 55e1f7f0db12..593debefc4e9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]);
472 472
473#ifdef CONFIG_DYNAMIC_FTRACE 473#ifdef CONFIG_DYNAMIC_FTRACE
474extern unsigned long ftrace_update_tot_cnt; 474extern unsigned long ftrace_update_tot_cnt;
475#endif
475#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 476#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
476extern int DYN_FTRACE_TEST_NAME(void); 477extern int DYN_FTRACE_TEST_NAME(void);
477#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 478#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
478extern int DYN_FTRACE_TEST_NAME2(void); 479extern int DYN_FTRACE_TEST_NAME2(void);
479#endif
480 480
481extern int ring_buffer_expanded; 481extern int ring_buffer_expanded;
482extern bool tracing_selftest_disabled; 482extern bool tracing_selftest_disabled;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752ae8f6..9824419c8404 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
258 258
259#ifdef CONFIG_FUNCTION_TRACER 259#ifdef CONFIG_FUNCTION_TRACER
260static void 260static void
261perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) 261perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
262 struct ftrace_ops *ops, struct pt_regs *pt_regs)
262{ 263{
263 struct ftrace_entry *entry; 264 struct ftrace_entry *entry;
264 struct hlist_head *head; 265 struct hlist_head *head;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29111da1d100..6825d833a257 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void)
1681static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 1681static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1682 1682
1683static void 1683static void
1684function_test_events_call(unsigned long ip, unsigned long parent_ip) 1684function_test_events_call(unsigned long ip, unsigned long parent_ip,
1685 struct ftrace_ops *op, struct pt_regs *pt_regs)
1685{ 1686{
1686 struct ring_buffer_event *event; 1687 struct ring_buffer_event *event;
1687 struct ring_buffer *buffer; 1688 struct ring_buffer *buffer;
@@ -1720,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1720static struct ftrace_ops trace_ops __initdata = 1721static struct ftrace_ops trace_ops __initdata =
1721{ 1722{
1722 .func = function_test_events_call, 1723 .func = function_test_events_call,
1724 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
1723}; 1725};
1724 1726
1725static __init void event_trace_self_test_with_function(void) 1727static __init void event_trace_self_test_with_function(void)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c7b0c6a7db09..fdff65dff1bb 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
48} 48}
49 49
50static void 50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op, struct pt_regs *pt_regs)
52{ 53{
53 struct trace_array *tr = func_trace; 54 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
75} 76}
76 77
77static void 78static void
78function_trace_call(unsigned long ip, unsigned long parent_ip) 79function_trace_call(unsigned long ip, unsigned long parent_ip,
80 struct ftrace_ops *op, struct pt_regs *pt_regs)
81
79{ 82{
80 struct trace_array *tr = func_trace; 83 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data; 84 struct trace_array_cpu *data;
@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
106} 109}
107 110
108static void 111static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip) 112function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
113 struct ftrace_ops *op, struct pt_regs *pt_regs)
110{ 114{
111 struct trace_array *tr = func_trace; 115 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data; 116 struct trace_array_cpu *data;
@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
149static struct ftrace_ops trace_ops __read_mostly = 153static struct ftrace_ops trace_ops __read_mostly =
150{ 154{
151 .func = function_trace_call, 155 .func = function_trace_call,
152 .flags = FTRACE_OPS_FL_GLOBAL, 156 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
153}; 157};
154 158
155static struct ftrace_ops trace_stack_ops __read_mostly = 159static struct ftrace_ops trace_stack_ops __read_mostly =
156{ 160{
157 .func = function_stack_trace_call, 161 .func = function_stack_trace_call,
158 .flags = FTRACE_OPS_FL_GLOBAL, 162 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
159}; 163};
160 164
161/* Our two options */ 165/* Our two options */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 99d20e920368..d98ee8283b29 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
136 * irqsoff uses its own tracer function to keep the overhead down: 136 * irqsoff uses its own tracer function to keep the overhead down:
137 */ 137 */
138static void 138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct pt_regs *pt_regs)
140{ 141{
141 struct trace_array *tr = irqsoff_trace; 142 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data; 143 struct trace_array_cpu *data;
@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
153static struct ftrace_ops trace_ops __read_mostly = 154static struct ftrace_ops trace_ops __read_mostly =
154{ 155{
155 .func = irqsoff_tracer_call, 156 .func = irqsoff_tracer_call,
156 .flags = FTRACE_OPS_FL_GLOBAL, 157 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
157}; 158};
158#endif /* CONFIG_FUNCTION_TRACER */ 159#endif /* CONFIG_FUNCTION_TRACER */
159 160
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ff791ea48b57..02170c00c413 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -108,7 +108,8 @@ out_enable:
108 * wakeup uses its own tracer function to keep the overhead down: 108 * wakeup uses its own tracer function to keep the overhead down:
109 */ 109 */
110static void 110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op, struct pt_regs *pt_regs)
112{ 113{
113 struct trace_array *tr = wakeup_trace; 114 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data; 115 struct trace_array_cpu *data;
@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
129static struct ftrace_ops trace_ops __read_mostly = 130static struct ftrace_ops trace_ops __read_mostly =
130{ 131{
131 .func = wakeup_tracer_call, 132 .func = wakeup_tracer_call,
132 .flags = FTRACE_OPS_FL_GLOBAL, 133 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
133}; 134};
134#endif /* CONFIG_FUNCTION_TRACER */ 135#endif /* CONFIG_FUNCTION_TRACER */
135 136
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 288541f977fb..1003a4d5eb25 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
103 103
104static int trace_selftest_test_probe1_cnt; 104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip, 105static void trace_selftest_test_probe1_func(unsigned long ip,
106 unsigned long pip) 106 unsigned long pip,
107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
107{ 109{
108 trace_selftest_test_probe1_cnt++; 110 trace_selftest_test_probe1_cnt++;
109} 111}
110 112
111static int trace_selftest_test_probe2_cnt; 113static int trace_selftest_test_probe2_cnt;
112static void trace_selftest_test_probe2_func(unsigned long ip, 114static void trace_selftest_test_probe2_func(unsigned long ip,
113 unsigned long pip) 115 unsigned long pip,
116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
114{ 118{
115 trace_selftest_test_probe2_cnt++; 119 trace_selftest_test_probe2_cnt++;
116} 120}
117 121
118static int trace_selftest_test_probe3_cnt; 122static int trace_selftest_test_probe3_cnt;
119static void trace_selftest_test_probe3_func(unsigned long ip, 123static void trace_selftest_test_probe3_func(unsigned long ip,
120 unsigned long pip) 124 unsigned long pip,
125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
121{ 127{
122 trace_selftest_test_probe3_cnt++; 128 trace_selftest_test_probe3_cnt++;
123} 129}
124 130
125static int trace_selftest_test_global_cnt; 131static int trace_selftest_test_global_cnt;
126static void trace_selftest_test_global_func(unsigned long ip, 132static void trace_selftest_test_global_func(unsigned long ip,
127 unsigned long pip) 133 unsigned long pip,
134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
128{ 136{
129 trace_selftest_test_global_cnt++; 137 trace_selftest_test_global_cnt++;
130} 138}
131 139
132static int trace_selftest_test_dyn_cnt; 140static int trace_selftest_test_dyn_cnt;
133static void trace_selftest_test_dyn_func(unsigned long ip, 141static void trace_selftest_test_dyn_func(unsigned long ip,
134 unsigned long pip) 142 unsigned long pip,
143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
135{ 145{
136 trace_selftest_test_dyn_cnt++; 146 trace_selftest_test_dyn_cnt++;
137} 147}
138 148
139static struct ftrace_ops test_probe1 = { 149static struct ftrace_ops test_probe1 = {
140 .func = trace_selftest_test_probe1_func, 150 .func = trace_selftest_test_probe1_func,
151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
141}; 152};
142 153
143static struct ftrace_ops test_probe2 = { 154static struct ftrace_ops test_probe2 = {
144 .func = trace_selftest_test_probe2_func, 155 .func = trace_selftest_test_probe2_func,
156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
145}; 157};
146 158
147static struct ftrace_ops test_probe3 = { 159static struct ftrace_ops test_probe3 = {
148 .func = trace_selftest_test_probe3_func, 160 .func = trace_selftest_test_probe3_func,
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
149}; 162};
150 163
151static struct ftrace_ops test_global = { 164static struct ftrace_ops test_global = {
152 .func = trace_selftest_test_global_func, 165 .func = trace_selftest_test_global_func,
153 .flags = FTRACE_OPS_FL_GLOBAL, 166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
154}; 167};
155 168
156static void print_counts(void) 169static void print_counts(void)
@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
393 406
394 return ret; 407 return ret;
395} 408}
409
410static int trace_selftest_recursion_cnt;
411static void trace_selftest_test_recursion_func(unsigned long ip,
412 unsigned long pip,
413 struct ftrace_ops *op,
414 struct pt_regs *pt_regs)
415{
416 /*
417 * This function is registered without the recursion safe flag.
418 * The ftrace infrastructure should provide the recursion
419 * protection. If not, this will crash the kernel!
420 */
421 trace_selftest_recursion_cnt++;
422 DYN_FTRACE_TEST_NAME();
423}
424
425static void trace_selftest_test_recursion_safe_func(unsigned long ip,
426 unsigned long pip,
427 struct ftrace_ops *op,
428 struct pt_regs *pt_regs)
429{
430 /*
431 * We said we would provide our own recursion. By calling
432 * this function again, we should recurse back into this function
433 * and count again. But this only happens if the arch supports
434 * all of ftrace features and nothing else is using the function
435 * tracing utility.
436 */
437 if (trace_selftest_recursion_cnt++)
438 return;
439 DYN_FTRACE_TEST_NAME();
440}
441
442static struct ftrace_ops test_rec_probe = {
443 .func = trace_selftest_test_recursion_func,
444};
445
446static struct ftrace_ops test_recsafe_probe = {
447 .func = trace_selftest_test_recursion_safe_func,
448 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
449};
450
451static int
452trace_selftest_function_recursion(void)
453{
454 int save_ftrace_enabled = ftrace_enabled;
455 int save_tracer_enabled = tracer_enabled;
456 char *func_name;
457 int len;
458 int ret;
459 int cnt;
460
461 /* The previous test PASSED */
462 pr_cont("PASSED\n");
463 pr_info("Testing ftrace recursion: ");
464
465
466 /* enable tracing, and record the filter function */
467 ftrace_enabled = 1;
468 tracer_enabled = 1;
469
470 /* Handle PPC64 '.' name */
471 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
472 len = strlen(func_name);
473
474 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
475 if (ret) {
476 pr_cont("*Could not set filter* ");
477 goto out;
478 }
479
480 ret = register_ftrace_function(&test_rec_probe);
481 if (ret) {
482 pr_cont("*could not register callback* ");
483 goto out;
484 }
485
486 DYN_FTRACE_TEST_NAME();
487
488 unregister_ftrace_function(&test_rec_probe);
489
490 ret = -1;
491 if (trace_selftest_recursion_cnt != 1) {
492 pr_cont("*callback not called once (%d)* ",
493 trace_selftest_recursion_cnt);
494 goto out;
495 }
496
497 trace_selftest_recursion_cnt = 1;
498
499 pr_cont("PASSED\n");
500 pr_info("Testing ftrace recursion safe: ");
501
502 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
503 if (ret) {
504 pr_cont("*Could not set filter* ");
505 goto out;
506 }
507
508 ret = register_ftrace_function(&test_recsafe_probe);
509 if (ret) {
510 pr_cont("*could not register callback* ");
511 goto out;
512 }
513
514 DYN_FTRACE_TEST_NAME();
515
516 unregister_ftrace_function(&test_recsafe_probe);
517
518 /*
519 * If arch supports all ftrace features, and no other task
520 * was on the list, we should be fine.
521 */
522 if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
523 cnt = 2; /* Should have recursed */
524 else
525 cnt = 1;
526
527 ret = -1;
528 if (trace_selftest_recursion_cnt != cnt) {
529 pr_cont("*callback not called expected %d times (%d)* ",
530 cnt, trace_selftest_recursion_cnt);
531 goto out;
532 }
533
534 ret = 0;
535out:
536 ftrace_enabled = save_ftrace_enabled;
537 tracer_enabled = save_tracer_enabled;
538
539 return ret;
540}
396#else 541#else
397# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 542# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
543# define trace_selftest_function_recursion() ({ 0; })
398#endif /* CONFIG_DYNAMIC_FTRACE */ 544#endif /* CONFIG_DYNAMIC_FTRACE */
399 545
546static enum {
547 TRACE_SELFTEST_REGS_START,
548 TRACE_SELFTEST_REGS_FOUND,
549 TRACE_SELFTEST_REGS_NOT_FOUND,
550} trace_selftest_regs_stat;
551
552static void trace_selftest_test_regs_func(unsigned long ip,
553 unsigned long pip,
554 struct ftrace_ops *op,
555 struct pt_regs *pt_regs)
556{
557 if (pt_regs)
558 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
559 else
560 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
561}
562
563static struct ftrace_ops test_regs_probe = {
564 .func = trace_selftest_test_regs_func,
565 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
566};
567
568static int
569trace_selftest_function_regs(void)
570{
571 int save_ftrace_enabled = ftrace_enabled;
572 int save_tracer_enabled = tracer_enabled;
573 char *func_name;
574 int len;
575 int ret;
576 int supported = 0;
577
578#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
579 supported = 1;
580#endif
581
582 /* The previous test PASSED */
583 pr_cont("PASSED\n");
584 pr_info("Testing ftrace regs%s: ",
585 !supported ? "(no arch support)" : "");
586
587 /* enable tracing, and record the filter function */
588 ftrace_enabled = 1;
589 tracer_enabled = 1;
590
591 /* Handle PPC64 '.' name */
592 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
593 len = strlen(func_name);
594
595 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
596 /*
597 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
598 * This test really doesn't care.
599 */
600 if (ret && ret != -ENODEV) {
601 pr_cont("*Could not set filter* ");
602 goto out;
603 }
604
605 ret = register_ftrace_function(&test_regs_probe);
606 /*
607 * Now if the arch does not support passing regs, then this should
608 * have failed.
609 */
610 if (!supported) {
611 if (!ret) {
612 pr_cont("*registered save-regs without arch support* ");
613 goto out;
614 }
615 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
616 ret = register_ftrace_function(&test_regs_probe);
617 }
618 if (ret) {
619 pr_cont("*could not register callback* ");
620 goto out;
621 }
622
623
624 DYN_FTRACE_TEST_NAME();
625
626 unregister_ftrace_function(&test_regs_probe);
627
628 ret = -1;
629
630 switch (trace_selftest_regs_stat) {
631 case TRACE_SELFTEST_REGS_START:
632 pr_cont("*callback never called* ");
633 goto out;
634
635 case TRACE_SELFTEST_REGS_FOUND:
636 if (supported)
637 break;
638 pr_cont("*callback received regs without arch support* ");
639 goto out;
640
641 case TRACE_SELFTEST_REGS_NOT_FOUND:
642 if (!supported)
643 break;
644 pr_cont("*callback received NULL regs* ");
645 goto out;
646 }
647
648 ret = 0;
649out:
650 ftrace_enabled = save_ftrace_enabled;
651 tracer_enabled = save_tracer_enabled;
652
653 return ret;
654}
655
400/* 656/*
401 * Simple verification test of ftrace function tracer. 657 * Simple verification test of ftrace function tracer.
402 * Enable ftrace, sleep 1/10 second, and then read the trace 658 * Enable ftrace, sleep 1/10 second, and then read the trace
@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
442 698
443 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 699 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
444 DYN_FTRACE_TEST_NAME); 700 DYN_FTRACE_TEST_NAME);
701 if (ret)
702 goto out;
703
704 ret = trace_selftest_function_recursion();
705 if (ret)
706 goto out;
445 707
708 ret = trace_selftest_function_regs();
446 out: 709 out:
447 ftrace_enabled = save_ftrace_enabled; 710 ftrace_enabled = save_ftrace_enabled;
448 tracer_enabled = save_tracer_enabled; 711 tracer_enabled = save_tracer_enabled;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d4545f49242e..0c1b165778e5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -111,7 +111,8 @@ static inline void check_stack(void)
111} 111}
112 112
113static void 113static void
114stack_trace_call(unsigned long ip, unsigned long parent_ip) 114stack_trace_call(unsigned long ip, unsigned long parent_ip,
115 struct ftrace_ops *op, struct pt_regs *pt_regs)
115{ 116{
116 int cpu; 117 int cpu;
117 118
@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
136static struct ftrace_ops trace_ops __read_mostly = 137static struct ftrace_ops trace_ops __read_mostly =
137{ 138{
138 .func = stack_trace_call, 139 .func = stack_trace_call,
140 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
139}; 141};
140 142
141static ssize_t 143static ssize_t