diff options
39 files changed, 841 insertions, 429 deletions
diff --git a/Documentation/markers.txt b/Documentation/markers.txt index 089f6138fcd9..6d275e4ef385 100644 --- a/Documentation/markers.txt +++ b/Documentation/markers.txt | |||
@@ -70,6 +70,20 @@ a printk warning which identifies the inconsistency: | |||
70 | 70 | ||
71 | "Format mismatch for probe probe_name (format), marker (format)" | 71 | "Format mismatch for probe probe_name (format), marker (format)" |
72 | 72 | ||
73 | Another way to use markers is to simply define the marker without generating any | ||
74 | function call to actually call into the marker. This is useful in combination | ||
75 | with tracepoint probes in a scheme like this : | ||
76 | |||
77 | void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk); | ||
78 | |||
79 | DEFINE_MARKER_TP(marker_eventname, tracepoint_name, probe_tracepoint_name, | ||
80 | "arg1 %u pid %d"); | ||
81 | |||
82 | notrace void probe_tracepoint_name(unsigned int arg1, struct task_struct *tsk) | ||
83 | { | ||
84 | struct marker *marker = &GET_MARKER(kernel_irq_entry); | ||
85 | /* write data to trace buffers ... */ | ||
86 | } | ||
73 | 87 | ||
74 | * Probe / marker example | 88 | * Probe / marker example |
75 | 89 | ||
diff --git a/Documentation/tracepoints.txt b/Documentation/tracepoints.txt index 5d354e167494..2d42241a25c3 100644 --- a/Documentation/tracepoints.txt +++ b/Documentation/tracepoints.txt | |||
@@ -3,28 +3,30 @@ | |||
3 | Mathieu Desnoyers | 3 | Mathieu Desnoyers |
4 | 4 | ||
5 | 5 | ||
6 | This document introduces Linux Kernel Tracepoints and their use. It provides | 6 | This document introduces Linux Kernel Tracepoints and their use. It |
7 | examples of how to insert tracepoints in the kernel and connect probe functions | 7 | provides examples of how to insert tracepoints in the kernel and |
8 | to them and provides some examples of probe functions. | 8 | connect probe functions to them and provides some examples of probe |
9 | functions. | ||
9 | 10 | ||
10 | 11 | ||
11 | * Purpose of tracepoints | 12 | * Purpose of tracepoints |
12 | 13 | ||
13 | A tracepoint placed in code provides a hook to call a function (probe) that you | 14 | A tracepoint placed in code provides a hook to call a function (probe) |
14 | can provide at runtime. A tracepoint can be "on" (a probe is connected to it) or | 15 | that you can provide at runtime. A tracepoint can be "on" (a probe is |
15 | "off" (no probe is attached). When a tracepoint is "off" it has no effect, | 16 | connected to it) or "off" (no probe is attached). When a tracepoint is |
16 | except for adding a tiny time penalty (checking a condition for a branch) and | 17 | "off" it has no effect, except for adding a tiny time penalty |
17 | space penalty (adding a few bytes for the function call at the end of the | 18 | (checking a condition for a branch) and space penalty (adding a few |
18 | instrumented function and adds a data structure in a separate section). When a | 19 | bytes for the function call at the end of the instrumented function |
19 | tracepoint is "on", the function you provide is called each time the tracepoint | 20 | and adds a data structure in a separate section). When a tracepoint |
20 | is executed, in the execution context of the caller. When the function provided | 21 | is "on", the function you provide is called each time the tracepoint |
21 | ends its execution, it returns to the caller (continuing from the tracepoint | 22 | is executed, in the execution context of the caller. When the function |
22 | site). | 23 | provided ends its execution, it returns to the caller (continuing from |
24 | the tracepoint site). | ||
23 | 25 | ||
24 | You can put tracepoints at important locations in the code. They are | 26 | You can put tracepoints at important locations in the code. They are |
25 | lightweight hooks that can pass an arbitrary number of parameters, | 27 | lightweight hooks that can pass an arbitrary number of parameters, |
26 | which prototypes are described in a tracepoint declaration placed in a header | 28 | which prototypes are described in a tracepoint declaration placed in a |
27 | file. | 29 | header file. |
28 | 30 | ||
29 | They can be used for tracing and performance accounting. | 31 | They can be used for tracing and performance accounting. |
30 | 32 | ||
@@ -42,7 +44,7 @@ In include/trace/subsys.h : | |||
42 | 44 | ||
43 | #include <linux/tracepoint.h> | 45 | #include <linux/tracepoint.h> |
44 | 46 | ||
45 | DEFINE_TRACE(subsys_eventname, | 47 | DECLARE_TRACE(subsys_eventname, |
46 | TPPTOTO(int firstarg, struct task_struct *p), | 48 | TPPTOTO(int firstarg, struct task_struct *p), |
47 | TPARGS(firstarg, p)); | 49 | TPARGS(firstarg, p)); |
48 | 50 | ||
@@ -50,6 +52,8 @@ In subsys/file.c (where the tracing statement must be added) : | |||
50 | 52 | ||
51 | #include <trace/subsys.h> | 53 | #include <trace/subsys.h> |
52 | 54 | ||
55 | DEFINE_TRACE(subsys_eventname); | ||
56 | |||
53 | void somefct(void) | 57 | void somefct(void) |
54 | { | 58 | { |
55 | ... | 59 | ... |
@@ -61,31 +65,41 @@ Where : | |||
61 | - subsys_eventname is an identifier unique to your event | 65 | - subsys_eventname is an identifier unique to your event |
62 | - subsys is the name of your subsystem. | 66 | - subsys is the name of your subsystem. |
63 | - eventname is the name of the event to trace. | 67 | - eventname is the name of the event to trace. |
64 | - TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the function | ||
65 | called by this tracepoint. | ||
66 | - TPARGS(firstarg, p) are the parameters names, same as found in the prototype. | ||
67 | 68 | ||
68 | Connecting a function (probe) to a tracepoint is done by providing a probe | 69 | - TPPTOTO(int firstarg, struct task_struct *p) is the prototype of the |
69 | (function to call) for the specific tracepoint through | 70 | function called by this tracepoint. |
70 | register_trace_subsys_eventname(). Removing a probe is done through | ||
71 | unregister_trace_subsys_eventname(); it will remove the probe sure there is no | ||
72 | caller left using the probe when it returns. Probe removal is preempt-safe | ||
73 | because preemption is disabled around the probe call. See the "Probe example" | ||
74 | section below for a sample probe module. | ||
75 | |||
76 | The tracepoint mechanism supports inserting multiple instances of the same | ||
77 | tracepoint, but a single definition must be made of a given tracepoint name over | ||
78 | all the kernel to make sure no type conflict will occur. Name mangling of the | ||
79 | tracepoints is done using the prototypes to make sure typing is correct. | ||
80 | Verification of probe type correctness is done at the registration site by the | ||
81 | compiler. Tracepoints can be put in inline functions, inlined static functions, | ||
82 | and unrolled loops as well as regular functions. | ||
83 | |||
84 | The naming scheme "subsys_event" is suggested here as a convention intended | ||
85 | to limit collisions. Tracepoint names are global to the kernel: they are | ||
86 | considered as being the same whether they are in the core kernel image or in | ||
87 | modules. | ||
88 | 71 | ||
72 | - TPARGS(firstarg, p) are the parameters names, same as found in the | ||
73 | prototype. | ||
74 | |||
75 | Connecting a function (probe) to a tracepoint is done by providing a | ||
76 | probe (function to call) for the specific tracepoint through | ||
77 | register_trace_subsys_eventname(). Removing a probe is done through | ||
78 | unregister_trace_subsys_eventname(); it will remove the probe. | ||
79 | |||
80 | tracepoint_synchronize_unregister() must be called before the end of | ||
81 | the module exit function to make sure there is no caller left using | ||
82 | the probe. This, and the fact that preemption is disabled around the | ||
83 | probe call, make sure that probe removal and module unload are safe. | ||
84 | See the "Probe example" section below for a sample probe module. | ||
85 | |||
86 | The tracepoint mechanism supports inserting multiple instances of the | ||
87 | same tracepoint, but a single definition must be made of a given | ||
88 | tracepoint name over all the kernel to make sure no type conflict will | ||
89 | occur. Name mangling of the tracepoints is done using the prototypes | ||
90 | to make sure typing is correct. Verification of probe type correctness | ||
91 | is done at the registration site by the compiler. Tracepoints can be | ||
92 | put in inline functions, inlined static functions, and unrolled loops | ||
93 | as well as regular functions. | ||
94 | |||
95 | The naming scheme "subsys_event" is suggested here as a convention | ||
96 | intended to limit collisions. Tracepoint names are global to the | ||
97 | kernel: they are considered as being the same whether they are in the | ||
98 | core kernel image or in modules. | ||
99 | |||
100 | If the tracepoint has to be used in kernel modules, an | ||
101 | EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be | ||
102 | used to export the defined tracepoints. | ||
89 | 103 | ||
90 | * Probe / tracepoint example | 104 | * Probe / tracepoint example |
91 | 105 | ||
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9b6a1fa19e70..2bb43b433e07 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -17,6 +17,14 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
17 | */ | 17 | */ |
18 | return addr - 1; | 18 | return addr - 1; |
19 | } | 19 | } |
20 | |||
21 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
22 | |||
23 | struct dyn_arch_ftrace { | ||
24 | /* No extra data needed for x86 */ | ||
25 | }; | ||
26 | |||
27 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
20 | #endif /* __ASSEMBLY__ */ | 28 | #endif /* __ASSEMBLY__ */ |
21 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
22 | 30 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index f97621149839..74defe21ba42 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1190,7 +1190,7 @@ ENTRY(mcount) | |||
1190 | jnz trace | 1190 | jnz trace |
1191 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1191 | #ifdef CONFIG_FUNCTION_RET_TRACER |
1192 | cmpl $ftrace_stub, ftrace_function_return | 1192 | cmpl $ftrace_stub, ftrace_function_return |
1193 | jnz trace_return | 1193 | jnz ftrace_return_caller |
1194 | #endif | 1194 | #endif |
1195 | .globl ftrace_stub | 1195 | .globl ftrace_stub |
1196 | ftrace_stub: | 1196 | ftrace_stub: |
@@ -1211,9 +1211,15 @@ trace: | |||
1211 | popl %ecx | 1211 | popl %ecx |
1212 | popl %eax | 1212 | popl %eax |
1213 | jmp ftrace_stub | 1213 | jmp ftrace_stub |
1214 | END(mcount) | ||
1215 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
1216 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
1214 | 1217 | ||
1215 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1218 | #ifdef CONFIG_FUNCTION_RET_TRACER |
1216 | trace_return: | 1219 | ENTRY(ftrace_return_caller) |
1220 | cmpl $0, function_trace_stop | ||
1221 | jne ftrace_stub | ||
1222 | |||
1217 | pushl %eax | 1223 | pushl %eax |
1218 | pushl %ecx | 1224 | pushl %ecx |
1219 | pushl %edx | 1225 | pushl %edx |
@@ -1223,7 +1229,8 @@ trace_return: | |||
1223 | popl %edx | 1229 | popl %edx |
1224 | popl %ecx | 1230 | popl %ecx |
1225 | popl %eax | 1231 | popl %eax |
1226 | jmp ftrace_stub | 1232 | ret |
1233 | END(ftrace_return_caller) | ||
1227 | 1234 | ||
1228 | .globl return_to_handler | 1235 | .globl return_to_handler |
1229 | return_to_handler: | 1236 | return_to_handler: |
@@ -1237,10 +1244,7 @@ return_to_handler: | |||
1237 | popl %ecx | 1244 | popl %ecx |
1238 | popl %eax | 1245 | popl %eax |
1239 | ret | 1246 | ret |
1240 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | 1247 | #endif |
1241 | END(mcount) | ||
1242 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
1243 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
1244 | 1248 | ||
1245 | .section .rodata,"a" | 1249 | .section .rodata,"a" |
1246 | #include "syscall_table_32.S" | 1250 | #include "syscall_table_32.S" |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index fe832738e1e2..924153edd973 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -24,133 +24,6 @@ | |||
24 | #include <asm/nmi.h> | 24 | #include <asm/nmi.h> |
25 | 25 | ||
26 | 26 | ||
27 | |||
28 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
29 | |||
30 | /* | ||
31 | * These functions are picked from those used on | ||
32 | * this page for dynamic ftrace. They have been | ||
33 | * simplified to ignore all traces in NMI context. | ||
34 | */ | ||
35 | static atomic_t in_nmi; | ||
36 | |||
37 | void ftrace_nmi_enter(void) | ||
38 | { | ||
39 | atomic_inc(&in_nmi); | ||
40 | } | ||
41 | |||
42 | void ftrace_nmi_exit(void) | ||
43 | { | ||
44 | atomic_dec(&in_nmi); | ||
45 | } | ||
46 | |||
47 | /* Add a function return address to the trace stack on thread info.*/ | ||
48 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
49 | unsigned long func) | ||
50 | { | ||
51 | int index; | ||
52 | struct thread_info *ti = current_thread_info(); | ||
53 | |||
54 | /* The return trace stack is full */ | ||
55 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) | ||
56 | return -EBUSY; | ||
57 | |||
58 | index = ++ti->curr_ret_stack; | ||
59 | ti->ret_stack[index].ret = ret; | ||
60 | ti->ret_stack[index].func = func; | ||
61 | ti->ret_stack[index].calltime = time; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
67 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | ||
68 | unsigned long *func) | ||
69 | { | ||
70 | int index; | ||
71 | |||
72 | struct thread_info *ti = current_thread_info(); | ||
73 | index = ti->curr_ret_stack; | ||
74 | *ret = ti->ret_stack[index].ret; | ||
75 | *func = ti->ret_stack[index].func; | ||
76 | *time = ti->ret_stack[index].calltime; | ||
77 | ti->curr_ret_stack--; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Send the trace to the ring-buffer. | ||
82 | * @return the original return address. | ||
83 | */ | ||
84 | unsigned long ftrace_return_to_handler(void) | ||
85 | { | ||
86 | struct ftrace_retfunc trace; | ||
87 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | ||
88 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
89 | ftrace_function_return(&trace); | ||
90 | |||
91 | return trace.ret; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Hook the return address and push it in the stack of return addrs | ||
96 | * in current thread info. | ||
97 | */ | ||
98 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
99 | { | ||
100 | unsigned long old; | ||
101 | unsigned long long calltime; | ||
102 | int faulted; | ||
103 | unsigned long return_hooker = (unsigned long) | ||
104 | &return_to_handler; | ||
105 | |||
106 | /* Nmi's are currently unsupported */ | ||
107 | if (atomic_read(&in_nmi)) | ||
108 | return; | ||
109 | |||
110 | /* | ||
111 | * Protect against fault, even if it shouldn't | ||
112 | * happen. This tool is too much intrusive to | ||
113 | * ignore such a protection. | ||
114 | */ | ||
115 | asm volatile( | ||
116 | "1: movl (%[parent_old]), %[old]\n" | ||
117 | "2: movl %[return_hooker], (%[parent_replaced])\n" | ||
118 | " movl $0, %[faulted]\n" | ||
119 | |||
120 | ".section .fixup, \"ax\"\n" | ||
121 | "3: movl $1, %[faulted]\n" | ||
122 | ".previous\n" | ||
123 | |||
124 | ".section __ex_table, \"a\"\n" | ||
125 | " .long 1b, 3b\n" | ||
126 | " .long 2b, 3b\n" | ||
127 | ".previous\n" | ||
128 | |||
129 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
130 | [faulted] "=r" (faulted) | ||
131 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
132 | : "memory" | ||
133 | ); | ||
134 | |||
135 | if (WARN_ON(faulted)) { | ||
136 | unregister_ftrace_return(); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | if (WARN_ON(!__kernel_text_address(old))) { | ||
141 | unregister_ftrace_return(); | ||
142 | *parent = old; | ||
143 | return; | ||
144 | } | ||
145 | |||
146 | calltime = cpu_clock(raw_smp_processor_id()); | ||
147 | |||
148 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | ||
149 | *parent = old; | ||
150 | } | ||
151 | |||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_DYNAMIC_FTRACE | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
155 | 28 | ||
156 | union ftrace_code_union { | 29 | union ftrace_code_union { |
@@ -166,7 +39,7 @@ static int ftrace_calc_offset(long ip, long addr) | |||
166 | return (int)(addr - ip); | 39 | return (int)(addr - ip); |
167 | } | 40 | } |
168 | 41 | ||
169 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 42 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
170 | { | 43 | { |
171 | static union ftrace_code_union calc; | 44 | static union ftrace_code_union calc; |
172 | 45 | ||
@@ -311,12 +184,12 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
311 | 184 | ||
312 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 185 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
313 | 186 | ||
314 | unsigned char *ftrace_nop_replace(void) | 187 | static unsigned char *ftrace_nop_replace(void) |
315 | { | 188 | { |
316 | return ftrace_nop; | 189 | return ftrace_nop; |
317 | } | 190 | } |
318 | 191 | ||
319 | int | 192 | static int |
320 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 193 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
321 | unsigned char *new_code) | 194 | unsigned char *new_code) |
322 | { | 195 | { |
@@ -349,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
349 | return 0; | 222 | return 0; |
350 | } | 223 | } |
351 | 224 | ||
225 | int ftrace_make_nop(struct module *mod, | ||
226 | struct dyn_ftrace *rec, unsigned long addr) | ||
227 | { | ||
228 | unsigned char *new, *old; | ||
229 | unsigned long ip = rec->ip; | ||
230 | |||
231 | old = ftrace_call_replace(ip, addr); | ||
232 | new = ftrace_nop_replace(); | ||
233 | |||
234 | return ftrace_modify_code(rec->ip, old, new); | ||
235 | } | ||
236 | |||
237 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
238 | { | ||
239 | unsigned char *new, *old; | ||
240 | unsigned long ip = rec->ip; | ||
241 | |||
242 | old = ftrace_nop_replace(); | ||
243 | new = ftrace_call_replace(ip, addr); | ||
244 | |||
245 | return ftrace_modify_code(rec->ip, old, new); | ||
246 | } | ||
247 | |||
352 | int ftrace_update_ftrace_func(ftrace_func_t func) | 248 | int ftrace_update_ftrace_func(ftrace_func_t func) |
353 | { | 249 | { |
354 | unsigned long ip = (unsigned long)(&ftrace_call); | 250 | unsigned long ip = (unsigned long)(&ftrace_call); |
@@ -426,3 +322,133 @@ int __init ftrace_dyn_arch_init(void *data) | |||
426 | return 0; | 322 | return 0; |
427 | } | 323 | } |
428 | #endif | 324 | #endif |
325 | |||
326 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
327 | |||
328 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
329 | |||
330 | /* | ||
331 | * These functions are picked from those used on | ||
332 | * this page for dynamic ftrace. They have been | ||
333 | * simplified to ignore all traces in NMI context. | ||
334 | */ | ||
335 | static atomic_t in_nmi; | ||
336 | |||
337 | void ftrace_nmi_enter(void) | ||
338 | { | ||
339 | atomic_inc(&in_nmi); | ||
340 | } | ||
341 | |||
342 | void ftrace_nmi_exit(void) | ||
343 | { | ||
344 | atomic_dec(&in_nmi); | ||
345 | } | ||
346 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
347 | |||
348 | /* Add a function return address to the trace stack on thread info.*/ | ||
349 | static int push_return_trace(unsigned long ret, unsigned long long time, | ||
350 | unsigned long func) | ||
351 | { | ||
352 | int index; | ||
353 | struct thread_info *ti = current_thread_info(); | ||
354 | |||
355 | /* The return trace stack is full */ | ||
356 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) | ||
357 | return -EBUSY; | ||
358 | |||
359 | index = ++ti->curr_ret_stack; | ||
360 | barrier(); | ||
361 | ti->ret_stack[index].ret = ret; | ||
362 | ti->ret_stack[index].func = func; | ||
363 | ti->ret_stack[index].calltime = time; | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
369 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | ||
370 | unsigned long *func) | ||
371 | { | ||
372 | int index; | ||
373 | |||
374 | struct thread_info *ti = current_thread_info(); | ||
375 | index = ti->curr_ret_stack; | ||
376 | *ret = ti->ret_stack[index].ret; | ||
377 | *func = ti->ret_stack[index].func; | ||
378 | *time = ti->ret_stack[index].calltime; | ||
379 | ti->curr_ret_stack--; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Send the trace to the ring-buffer. | ||
384 | * @return the original return address. | ||
385 | */ | ||
386 | unsigned long ftrace_return_to_handler(void) | ||
387 | { | ||
388 | struct ftrace_retfunc trace; | ||
389 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | ||
390 | trace.rettime = cpu_clock(raw_smp_processor_id()); | ||
391 | ftrace_function_return(&trace); | ||
392 | |||
393 | return trace.ret; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Hook the return address and push it in the stack of return addrs | ||
398 | * in current thread info. | ||
399 | */ | ||
400 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
401 | { | ||
402 | unsigned long old; | ||
403 | unsigned long long calltime; | ||
404 | int faulted; | ||
405 | unsigned long return_hooker = (unsigned long) | ||
406 | &return_to_handler; | ||
407 | |||
408 | /* Nmi's are currently unsupported */ | ||
409 | if (atomic_read(&in_nmi)) | ||
410 | return; | ||
411 | |||
412 | /* | ||
413 | * Protect against fault, even if it shouldn't | ||
414 | * happen. This tool is too much intrusive to | ||
415 | * ignore such a protection. | ||
416 | */ | ||
417 | asm volatile( | ||
418 | "1: movl (%[parent_old]), %[old]\n" | ||
419 | "2: movl %[return_hooker], (%[parent_replaced])\n" | ||
420 | " movl $0, %[faulted]\n" | ||
421 | |||
422 | ".section .fixup, \"ax\"\n" | ||
423 | "3: movl $1, %[faulted]\n" | ||
424 | ".previous\n" | ||
425 | |||
426 | ".section __ex_table, \"a\"\n" | ||
427 | " .long 1b, 3b\n" | ||
428 | " .long 2b, 3b\n" | ||
429 | ".previous\n" | ||
430 | |||
431 | : [parent_replaced] "=r" (parent), [old] "=r" (old), | ||
432 | [faulted] "=r" (faulted) | ||
433 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | ||
434 | : "memory" | ||
435 | ); | ||
436 | |||
437 | if (WARN_ON(faulted)) { | ||
438 | unregister_ftrace_return(); | ||
439 | return; | ||
440 | } | ||
441 | |||
442 | if (WARN_ON(!__kernel_text_address(old))) { | ||
443 | unregister_ftrace_return(); | ||
444 | *parent = old; | ||
445 | return; | ||
446 | } | ||
447 | |||
448 | calltime = cpu_clock(raw_smp_processor_id()); | ||
449 | |||
450 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | ||
451 | *parent = old; | ||
452 | } | ||
453 | |||
454 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a5e4ed9baec8..3b46ae464933 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -71,6 +71,7 @@ | |||
71 | VMLINUX_SYMBOL(__start___markers) = .; \ | 71 | VMLINUX_SYMBOL(__start___markers) = .; \ |
72 | *(__markers) \ | 72 | *(__markers) \ |
73 | VMLINUX_SYMBOL(__stop___markers) = .; \ | 73 | VMLINUX_SYMBOL(__stop___markers) = .; \ |
74 | . = ALIGN(32); \ | ||
74 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | 75 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ |
75 | *(__tracepoints) \ | 76 | *(__tracepoints) \ |
76 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ | 77 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4fbc4a8b86a5..f1af1aab00e6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -25,6 +25,17 @@ struct ftrace_ops { | |||
25 | 25 | ||
26 | extern int function_trace_stop; | 26 | extern int function_trace_stop; |
27 | 27 | ||
28 | /* | ||
29 | * Type of the current tracing. | ||
30 | */ | ||
31 | enum ftrace_tracing_type_t { | ||
32 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | ||
33 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | ||
34 | }; | ||
35 | |||
36 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | ||
37 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | ||
38 | |||
28 | /** | 39 | /** |
29 | * ftrace_stop - stop function tracer. | 40 | * ftrace_stop - stop function tracer. |
30 | * | 41 | * |
@@ -74,6 +85,9 @@ static inline void ftrace_start(void) { } | |||
74 | #endif /* CONFIG_FUNCTION_TRACER */ | 85 | #endif /* CONFIG_FUNCTION_TRACER */ |
75 | 86 | ||
76 | #ifdef CONFIG_DYNAMIC_FTRACE | 87 | #ifdef CONFIG_DYNAMIC_FTRACE |
88 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | ||
89 | #include <asm/ftrace.h> | ||
90 | |||
77 | enum { | 91 | enum { |
78 | FTRACE_FL_FREE = (1 << 0), | 92 | FTRACE_FL_FREE = (1 << 0), |
79 | FTRACE_FL_FAILED = (1 << 1), | 93 | FTRACE_FL_FAILED = (1 << 1), |
@@ -88,6 +102,7 @@ struct dyn_ftrace { | |||
88 | struct list_head list; | 102 | struct list_head list; |
89 | unsigned long ip; /* address of mcount call-site */ | 103 | unsigned long ip; /* address of mcount call-site */ |
90 | unsigned long flags; | 104 | unsigned long flags; |
105 | struct dyn_arch_ftrace arch; | ||
91 | }; | 106 | }; |
92 | 107 | ||
93 | int ftrace_force_update(void); | 108 | int ftrace_force_update(void); |
@@ -95,22 +110,43 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset); | |||
95 | 110 | ||
96 | /* defined in arch */ | 111 | /* defined in arch */ |
97 | extern int ftrace_ip_converted(unsigned long ip); | 112 | extern int ftrace_ip_converted(unsigned long ip); |
98 | extern unsigned char *ftrace_nop_replace(void); | ||
99 | extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); | ||
100 | extern int ftrace_dyn_arch_init(void *data); | 113 | extern int ftrace_dyn_arch_init(void *data); |
101 | extern int ftrace_update_ftrace_func(ftrace_func_t func); | 114 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
102 | extern void ftrace_caller(void); | 115 | extern void ftrace_caller(void); |
103 | extern void ftrace_call(void); | 116 | extern void ftrace_call(void); |
104 | extern void mcount_call(void); | 117 | extern void mcount_call(void); |
118 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
119 | extern void ftrace_return_caller(void); | ||
120 | #endif | ||
105 | 121 | ||
106 | /* May be defined in arch */ | 122 | /** |
107 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 123 | * ftrace_make_nop - convert code into top |
124 | * @mod: module structure if called by module load initialization | ||
125 | * @rec: the mcount call site record | ||
126 | * @addr: the address that the call site should be calling | ||
127 | * | ||
128 | * This is a very sensitive operation and great care needs | ||
129 | * to be taken by the arch. The operation should carefully | ||
130 | * read the location, check to see if what is read is indeed | ||
131 | * what we expect it to be, and then on success of the compare, | ||
132 | * it should write to the location. | ||
133 | * | ||
134 | * The code segment at @rec->ip should be a caller to @addr | ||
135 | * | ||
136 | * Return must be: | ||
137 | * 0 on success | ||
138 | * -EFAULT on error reading the location | ||
139 | * -EINVAL on a failed compare of the contents | ||
140 | * -EPERM on error writing to the location | ||
141 | * Any other value will be considered a failure. | ||
142 | */ | ||
143 | extern int ftrace_make_nop(struct module *mod, | ||
144 | struct dyn_ftrace *rec, unsigned long addr); | ||
108 | 145 | ||
109 | /** | 146 | /** |
110 | * ftrace_modify_code - modify code segment | 147 | * ftrace_make_call - convert a nop call site into a call to addr |
111 | * @ip: the address of the code segment | 148 | * @rec: the mcount call site record |
112 | * @old_code: the contents of what is expected to be there | 149 | * @addr: the address that the call site should call |
113 | * @new_code: the code to patch in | ||
114 | * | 150 | * |
115 | * This is a very sensitive operation and great care needs | 151 | * This is a very sensitive operation and great care needs |
116 | * to be taken by the arch. The operation should carefully | 152 | * to be taken by the arch. The operation should carefully |
@@ -118,6 +154,8 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
118 | * what we expect it to be, and then on success of the compare, | 154 | * what we expect it to be, and then on success of the compare, |
119 | * it should write to the location. | 155 | * it should write to the location. |
120 | * | 156 | * |
157 | * The code segment at @rec->ip should be a nop | ||
158 | * | ||
121 | * Return must be: | 159 | * Return must be: |
122 | * 0 on success | 160 | * 0 on success |
123 | * -EFAULT on error reading the location | 161 | * -EFAULT on error reading the location |
@@ -125,8 +163,11 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
125 | * -EPERM on error writing to the location | 163 | * -EPERM on error writing to the location |
126 | * Any other value will be considered a failure. | 164 | * Any other value will be considered a failure. |
127 | */ | 165 | */ |
128 | extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 166 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
129 | unsigned char *new_code); | 167 | |
168 | |||
169 | /* May be defined in arch */ | ||
170 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | ||
130 | 171 | ||
131 | extern int skip_trace(unsigned long ip); | 172 | extern int skip_trace(unsigned long ip); |
132 | 173 | ||
@@ -259,11 +300,13 @@ static inline void ftrace_dump(void) { } | |||
259 | 300 | ||
260 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 301 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
261 | extern void ftrace_init(void); | 302 | extern void ftrace_init(void); |
262 | extern void ftrace_init_module(unsigned long *start, unsigned long *end); | 303 | extern void ftrace_init_module(struct module *mod, |
304 | unsigned long *start, unsigned long *end); | ||
263 | #else | 305 | #else |
264 | static inline void ftrace_init(void) { } | 306 | static inline void ftrace_init(void) { } |
265 | static inline void | 307 | static inline void |
266 | ftrace_init_module(unsigned long *start, unsigned long *end) { } | 308 | ftrace_init_module(struct module *mod, |
309 | unsigned long *start, unsigned long *end) { } | ||
267 | #endif | 310 | #endif |
268 | 311 | ||
269 | 312 | ||
@@ -281,7 +324,7 @@ struct ftrace_retfunc { | |||
281 | /* Type of a callback handler of tracing return function */ | 324 | /* Type of a callback handler of tracing return function */ |
282 | typedef void (*trace_function_return_t)(struct ftrace_retfunc *); | 325 | typedef void (*trace_function_return_t)(struct ftrace_retfunc *); |
283 | 326 | ||
284 | extern void register_ftrace_return(trace_function_return_t func); | 327 | extern int register_ftrace_return(trace_function_return_t func); |
285 | /* The current handler in use */ | 328 | /* The current handler in use */ |
286 | extern trace_function_return_t ftrace_function_return; | 329 | extern trace_function_return_t ftrace_function_return; |
287 | extern void unregister_ftrace_return(void); | 330 | extern void unregister_ftrace_return(void); |
diff --git a/include/linux/marker.h b/include/linux/marker.h index 4cf45472d9f5..34c14bc957f5 100644 --- a/include/linux/marker.h +++ b/include/linux/marker.h | |||
@@ -12,6 +12,7 @@ | |||
12 | * See the file COPYING for more details. | 12 | * See the file COPYING for more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <stdarg.h> | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | 17 | ||
17 | struct module; | 18 | struct module; |
@@ -48,10 +49,28 @@ struct marker { | |||
48 | void (*call)(const struct marker *mdata, void *call_private, ...); | 49 | void (*call)(const struct marker *mdata, void *call_private, ...); |
49 | struct marker_probe_closure single; | 50 | struct marker_probe_closure single; |
50 | struct marker_probe_closure *multi; | 51 | struct marker_probe_closure *multi; |
52 | const char *tp_name; /* Optional tracepoint name */ | ||
53 | void *tp_cb; /* Optional tracepoint callback */ | ||
51 | } __attribute__((aligned(8))); | 54 | } __attribute__((aligned(8))); |
52 | 55 | ||
53 | #ifdef CONFIG_MARKERS | 56 | #ifdef CONFIG_MARKERS |
54 | 57 | ||
58 | #define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \ | ||
59 | static const char __mstrtab_##name[] \ | ||
60 | __attribute__((section("__markers_strings"))) \ | ||
61 | = #name "\0" format; \ | ||
62 | static struct marker __mark_##name \ | ||
63 | __attribute__((section("__markers"), aligned(8))) = \ | ||
64 | { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \ | ||
65 | 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\ | ||
66 | NULL, tp_name_str, tp_cb } | ||
67 | |||
68 | #define DEFINE_MARKER(name, format) \ | ||
69 | _DEFINE_MARKER(name, NULL, NULL, format) | ||
70 | |||
71 | #define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \ | ||
72 | _DEFINE_MARKER(name, #tp_name, tp_cb, format) | ||
73 | |||
55 | /* | 74 | /* |
56 | * Note : the empty asm volatile with read constraint is used here instead of a | 75 | * Note : the empty asm volatile with read constraint is used here instead of a |
57 | * "used" attribute to fix a gcc 4.1.x bug. | 76 | * "used" attribute to fix a gcc 4.1.x bug. |
@@ -65,14 +84,7 @@ struct marker { | |||
65 | */ | 84 | */ |
66 | #define __trace_mark(generic, name, call_private, format, args...) \ | 85 | #define __trace_mark(generic, name, call_private, format, args...) \ |
67 | do { \ | 86 | do { \ |
68 | static const char __mstrtab_##name[] \ | 87 | DEFINE_MARKER(name, format); \ |
69 | __attribute__((section("__markers_strings"))) \ | ||
70 | = #name "\0" format; \ | ||
71 | static struct marker __mark_##name \ | ||
72 | __attribute__((section("__markers"), aligned(8))) = \ | ||
73 | { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \ | ||
74 | 0, 0, marker_probe_cb, \ | ||
75 | { __mark_empty_function, NULL}, NULL }; \ | ||
76 | __mark_check_format(format, ## args); \ | 88 | __mark_check_format(format, ## args); \ |
77 | if (unlikely(__mark_##name.state)) { \ | 89 | if (unlikely(__mark_##name.state)) { \ |
78 | (*__mark_##name.call) \ | 90 | (*__mark_##name.call) \ |
@@ -80,14 +92,39 @@ struct marker { | |||
80 | } \ | 92 | } \ |
81 | } while (0) | 93 | } while (0) |
82 | 94 | ||
95 | #define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ | ||
96 | do { \ | ||
97 | void __check_tp_type(void) \ | ||
98 | { \ | ||
99 | register_trace_##tp_name(tp_cb); \ | ||
100 | } \ | ||
101 | DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \ | ||
102 | __mark_check_format(format, ## args); \ | ||
103 | (*__mark_##name.call)(&__mark_##name, call_private, \ | ||
104 | ## args); \ | ||
105 | } while (0) | ||
106 | |||
83 | extern void marker_update_probe_range(struct marker *begin, | 107 | extern void marker_update_probe_range(struct marker *begin, |
84 | struct marker *end); | 108 | struct marker *end); |
109 | |||
110 | #define GET_MARKER(name) (__mark_##name) | ||
111 | |||
85 | #else /* !CONFIG_MARKERS */ | 112 | #else /* !CONFIG_MARKERS */ |
113 | #define DEFINE_MARKER(name, tp_name, tp_cb, format) | ||
86 | #define __trace_mark(generic, name, call_private, format, args...) \ | 114 | #define __trace_mark(generic, name, call_private, format, args...) \ |
87 | __mark_check_format(format, ## args) | 115 | __mark_check_format(format, ## args) |
116 | #define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ | ||
117 | do { \ | ||
118 | void __check_tp_type(void) \ | ||
119 | { \ | ||
120 | register_trace_##tp_name(tp_cb); \ | ||
121 | } \ | ||
122 | __mark_check_format(format, ## args); \ | ||
123 | } while (0) | ||
88 | static inline void marker_update_probe_range(struct marker *begin, | 124 | static inline void marker_update_probe_range(struct marker *begin, |
89 | struct marker *end) | 125 | struct marker *end) |
90 | { } | 126 | { } |
127 | #define GET_MARKER(name) | ||
91 | #endif /* CONFIG_MARKERS */ | 128 | #endif /* CONFIG_MARKERS */ |
92 | 129 | ||
93 | /** | 130 | /** |
@@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin, | |||
117 | __trace_mark(1, name, NULL, format, ## args) | 154 | __trace_mark(1, name, NULL, format, ## args) |
118 | 155 | ||
119 | /** | 156 | /** |
157 | * trace_mark_tp - Marker in a tracepoint callback | ||
158 | * @name: marker name, not quoted. | ||
159 | * @tp_name: tracepoint name, not quoted. | ||
160 | * @tp_cb: tracepoint callback. Should have an associated global symbol so it | ||
161 | * is not optimized away by the compiler (should not be static). | ||
162 | * @format: format string | ||
163 | * @args...: variable argument list | ||
164 | * | ||
165 | * Places a marker in a tracepoint callback. | ||
166 | */ | ||
167 | #define trace_mark_tp(name, tp_name, tp_cb, format, args...) \ | ||
168 | __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args) | ||
169 | |||
170 | /** | ||
120 | * MARK_NOARGS - Format string for a marker with no argument. | 171 | * MARK_NOARGS - Format string for a marker with no argument. |
121 | */ | 172 | */ |
122 | #define MARK_NOARGS " " | 173 | #define MARK_NOARGS " " |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 86f1f5e43e33..895dc9c1088c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -142,6 +142,7 @@ struct rcu_head { | |||
142 | * on the write-side to insure proper synchronization. | 142 | * on the write-side to insure proper synchronization. |
143 | */ | 143 | */ |
144 | #define rcu_read_lock_sched() preempt_disable() | 144 | #define rcu_read_lock_sched() preempt_disable() |
145 | #define rcu_read_lock_sched_notrace() preempt_disable_notrace() | ||
145 | 146 | ||
146 | /* | 147 | /* |
147 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | 148 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
@@ -149,6 +150,7 @@ struct rcu_head { | |||
149 | * See rcu_read_lock_sched for more information. | 150 | * See rcu_read_lock_sched for more information. |
150 | */ | 151 | */ |
151 | #define rcu_read_unlock_sched() preempt_enable() | 152 | #define rcu_read_unlock_sched() preempt_enable() |
153 | #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() | ||
152 | 154 | ||
153 | 155 | ||
154 | 156 | ||
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 63064e9403f2..757005458366 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -24,8 +24,12 @@ struct tracepoint { | |||
24 | const char *name; /* Tracepoint name */ | 24 | const char *name; /* Tracepoint name */ |
25 | int state; /* State. */ | 25 | int state; /* State. */ |
26 | void **funcs; | 26 | void **funcs; |
27 | } __attribute__((aligned(8))); | 27 | } __attribute__((aligned(32))); /* |
28 | 28 | * Aligned on 32 bytes because it is | |
29 | * globally visible and gcc happily | ||
30 | * align these on the structure size. | ||
31 | * Keep in sync with vmlinux.lds.h. | ||
32 | */ | ||
29 | 33 | ||
30 | #define TPPROTO(args...) args | 34 | #define TPPROTO(args...) args |
31 | #define TPARGS(args...) args | 35 | #define TPARGS(args...) args |
@@ -40,14 +44,14 @@ struct tracepoint { | |||
40 | do { \ | 44 | do { \ |
41 | void **it_func; \ | 45 | void **it_func; \ |
42 | \ | 46 | \ |
43 | rcu_read_lock_sched(); \ | 47 | rcu_read_lock_sched_notrace(); \ |
44 | it_func = rcu_dereference((tp)->funcs); \ | 48 | it_func = rcu_dereference((tp)->funcs); \ |
45 | if (it_func) { \ | 49 | if (it_func) { \ |
46 | do { \ | 50 | do { \ |
47 | ((void(*)(proto))(*it_func))(args); \ | 51 | ((void(*)(proto))(*it_func))(args); \ |
48 | } while (*(++it_func)); \ | 52 | } while (*(++it_func)); \ |
49 | } \ | 53 | } \ |
50 | rcu_read_unlock_sched(); \ | 54 | rcu_read_unlock_sched_notrace(); \ |
51 | } while (0) | 55 | } while (0) |
52 | 56 | ||
53 | /* | 57 | /* |
@@ -55,35 +59,40 @@ struct tracepoint { | |||
55 | * not add unwanted padding between the beginning of the section and the | 59 | * not add unwanted padding between the beginning of the section and the |
56 | * structure. Force alignment to the same alignment as the section start. | 60 | * structure. Force alignment to the same alignment as the section start. |
57 | */ | 61 | */ |
58 | #define DEFINE_TRACE(name, proto, args) \ | 62 | #define DECLARE_TRACE(name, proto, args) \ |
63 | extern struct tracepoint __tracepoint_##name; \ | ||
59 | static inline void trace_##name(proto) \ | 64 | static inline void trace_##name(proto) \ |
60 | { \ | 65 | { \ |
61 | static const char __tpstrtab_##name[] \ | ||
62 | __attribute__((section("__tracepoints_strings"))) \ | ||
63 | = #name ":" #proto; \ | ||
64 | static struct tracepoint __tracepoint_##name \ | ||
65 | __attribute__((section("__tracepoints"), aligned(8))) = \ | ||
66 | { __tpstrtab_##name, 0, NULL }; \ | ||
67 | if (unlikely(__tracepoint_##name.state)) \ | 66 | if (unlikely(__tracepoint_##name.state)) \ |
68 | __DO_TRACE(&__tracepoint_##name, \ | 67 | __DO_TRACE(&__tracepoint_##name, \ |
69 | TPPROTO(proto), TPARGS(args)); \ | 68 | TPPROTO(proto), TPARGS(args)); \ |
70 | } \ | 69 | } \ |
71 | static inline int register_trace_##name(void (*probe)(proto)) \ | 70 | static inline int register_trace_##name(void (*probe)(proto)) \ |
72 | { \ | 71 | { \ |
73 | return tracepoint_probe_register(#name ":" #proto, \ | 72 | return tracepoint_probe_register(#name, (void *)probe); \ |
74 | (void *)probe); \ | ||
75 | } \ | 73 | } \ |
76 | static inline void unregister_trace_##name(void (*probe)(proto))\ | 74 | static inline int unregister_trace_##name(void (*probe)(proto)) \ |
77 | { \ | 75 | { \ |
78 | tracepoint_probe_unregister(#name ":" #proto, \ | 76 | return tracepoint_probe_unregister(#name, (void *)probe);\ |
79 | (void *)probe); \ | ||
80 | } | 77 | } |
81 | 78 | ||
79 | #define DEFINE_TRACE(name) \ | ||
80 | static const char __tpstrtab_##name[] \ | ||
81 | __attribute__((section("__tracepoints_strings"))) = #name; \ | ||
82 | struct tracepoint __tracepoint_##name \ | ||
83 | __attribute__((section("__tracepoints"), aligned(32))) = \ | ||
84 | { __tpstrtab_##name, 0, NULL } | ||
85 | |||
86 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ | ||
87 | EXPORT_SYMBOL_GPL(__tracepoint_##name) | ||
88 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ | ||
89 | EXPORT_SYMBOL(__tracepoint_##name) | ||
90 | |||
82 | extern void tracepoint_update_probe_range(struct tracepoint *begin, | 91 | extern void tracepoint_update_probe_range(struct tracepoint *begin, |
83 | struct tracepoint *end); | 92 | struct tracepoint *end); |
84 | 93 | ||
85 | #else /* !CONFIG_TRACEPOINTS */ | 94 | #else /* !CONFIG_TRACEPOINTS */ |
86 | #define DEFINE_TRACE(name, proto, args) \ | 95 | #define DECLARE_TRACE(name, proto, args) \ |
87 | static inline void _do_trace_##name(struct tracepoint *tp, proto) \ | 96 | static inline void _do_trace_##name(struct tracepoint *tp, proto) \ |
88 | { } \ | 97 | { } \ |
89 | static inline void trace_##name(proto) \ | 98 | static inline void trace_##name(proto) \ |
@@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, | |||
92 | { \ | 101 | { \ |
93 | return -ENOSYS; \ | 102 | return -ENOSYS; \ |
94 | } \ | 103 | } \ |
95 | static inline void unregister_trace_##name(void (*probe)(proto))\ | 104 | static inline int unregister_trace_##name(void (*probe)(proto)) \ |
96 | { } | 105 | { \ |
106 | return -ENOSYS; \ | ||
107 | } | ||
108 | |||
109 | #define DEFINE_TRACE(name) | ||
110 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) | ||
111 | #define EXPORT_TRACEPOINT_SYMBOL(name) | ||
97 | 112 | ||
98 | static inline void tracepoint_update_probe_range(struct tracepoint *begin, | 113 | static inline void tracepoint_update_probe_range(struct tracepoint *begin, |
99 | struct tracepoint *end) | 114 | struct tracepoint *end) |
diff --git a/include/trace/sched.h b/include/trace/sched.h index ad47369d01b5..9b2854abf7e2 100644 --- a/include/trace/sched.h +++ b/include/trace/sched.h | |||
@@ -4,52 +4,52 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/tracepoint.h> | 5 | #include <linux/tracepoint.h> |
6 | 6 | ||
7 | DEFINE_TRACE(sched_kthread_stop, | 7 | DECLARE_TRACE(sched_kthread_stop, |
8 | TPPROTO(struct task_struct *t), | 8 | TPPROTO(struct task_struct *t), |
9 | TPARGS(t)); | 9 | TPARGS(t)); |
10 | 10 | ||
11 | DEFINE_TRACE(sched_kthread_stop_ret, | 11 | DECLARE_TRACE(sched_kthread_stop_ret, |
12 | TPPROTO(int ret), | 12 | TPPROTO(int ret), |
13 | TPARGS(ret)); | 13 | TPARGS(ret)); |
14 | 14 | ||
15 | DEFINE_TRACE(sched_wait_task, | 15 | DECLARE_TRACE(sched_wait_task, |
16 | TPPROTO(struct rq *rq, struct task_struct *p), | 16 | TPPROTO(struct rq *rq, struct task_struct *p), |
17 | TPARGS(rq, p)); | 17 | TPARGS(rq, p)); |
18 | 18 | ||
19 | DEFINE_TRACE(sched_wakeup, | 19 | DECLARE_TRACE(sched_wakeup, |
20 | TPPROTO(struct rq *rq, struct task_struct *p), | 20 | TPPROTO(struct rq *rq, struct task_struct *p), |
21 | TPARGS(rq, p)); | 21 | TPARGS(rq, p)); |
22 | 22 | ||
23 | DEFINE_TRACE(sched_wakeup_new, | 23 | DECLARE_TRACE(sched_wakeup_new, |
24 | TPPROTO(struct rq *rq, struct task_struct *p), | 24 | TPPROTO(struct rq *rq, struct task_struct *p), |
25 | TPARGS(rq, p)); | 25 | TPARGS(rq, p)); |
26 | 26 | ||
27 | DEFINE_TRACE(sched_switch, | 27 | DECLARE_TRACE(sched_switch, |
28 | TPPROTO(struct rq *rq, struct task_struct *prev, | 28 | TPPROTO(struct rq *rq, struct task_struct *prev, |
29 | struct task_struct *next), | 29 | struct task_struct *next), |
30 | TPARGS(rq, prev, next)); | 30 | TPARGS(rq, prev, next)); |
31 | 31 | ||
32 | DEFINE_TRACE(sched_migrate_task, | 32 | DECLARE_TRACE(sched_migrate_task, |
33 | TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu), | 33 | TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu), |
34 | TPARGS(rq, p, dest_cpu)); | 34 | TPARGS(rq, p, dest_cpu)); |
35 | 35 | ||
36 | DEFINE_TRACE(sched_process_free, | 36 | DECLARE_TRACE(sched_process_free, |
37 | TPPROTO(struct task_struct *p), | 37 | TPPROTO(struct task_struct *p), |
38 | TPARGS(p)); | 38 | TPARGS(p)); |
39 | 39 | ||
40 | DEFINE_TRACE(sched_process_exit, | 40 | DECLARE_TRACE(sched_process_exit, |
41 | TPPROTO(struct task_struct *p), | 41 | TPPROTO(struct task_struct *p), |
42 | TPARGS(p)); | 42 | TPARGS(p)); |
43 | 43 | ||
44 | DEFINE_TRACE(sched_process_wait, | 44 | DECLARE_TRACE(sched_process_wait, |
45 | TPPROTO(struct pid *pid), | 45 | TPPROTO(struct pid *pid), |
46 | TPARGS(pid)); | 46 | TPARGS(pid)); |
47 | 47 | ||
48 | DEFINE_TRACE(sched_process_fork, | 48 | DECLARE_TRACE(sched_process_fork, |
49 | TPPROTO(struct task_struct *parent, struct task_struct *child), | 49 | TPPROTO(struct task_struct *parent, struct task_struct *child), |
50 | TPARGS(parent, child)); | 50 | TPARGS(parent, child)); |
51 | 51 | ||
52 | DEFINE_TRACE(sched_signal_send, | 52 | DECLARE_TRACE(sched_signal_send, |
53 | TPPROTO(int sig, struct task_struct *p), | 53 | TPPROTO(int sig, struct task_struct *p), |
54 | TPARGS(sig, p)); | 54 | TPARGS(sig, p)); |
55 | 55 | ||
diff --git a/init/Kconfig b/init/Kconfig index 86b00c53fade..f5bacb438711 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -808,6 +808,7 @@ config TRACEPOINTS | |||
808 | 808 | ||
809 | config MARKERS | 809 | config MARKERS |
810 | bool "Activate markers" | 810 | bool "Activate markers" |
811 | depends on TRACEPOINTS | ||
811 | help | 812 | help |
812 | Place an empty function call at each marker site. Can be | 813 | Place an empty function call at each marker site. Can be |
813 | dynamically changed for a probe function. | 814 | dynamically changed for a probe function. |
diff --git a/kernel/exit.c b/kernel/exit.c index ae2b92be5fae..f995d2418668 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -54,6 +54,10 @@ | |||
54 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
55 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
56 | 56 | ||
57 | DEFINE_TRACE(sched_process_free); | ||
58 | DEFINE_TRACE(sched_process_exit); | ||
59 | DEFINE_TRACE(sched_process_wait); | ||
60 | |||
57 | static void exit_mm(struct task_struct * tsk); | 61 | static void exit_mm(struct task_struct * tsk); |
58 | 62 | ||
59 | static inline int task_detached(struct task_struct *p) | 63 | static inline int task_detached(struct task_struct *p) |
diff --git a/kernel/fork.c b/kernel/fork.c index f6083561dfe0..0837d0deee5f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -79,6 +79,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; | |||
79 | 79 | ||
80 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 80 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
81 | 81 | ||
82 | DEFINE_TRACE(sched_process_fork); | ||
83 | |||
82 | int nr_processes(void) | 84 | int nr_processes(void) |
83 | { | 85 | { |
84 | int cpu; | 86 | int cpu; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e7a7ce3ed0a..4fbc456f393d 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -21,6 +21,9 @@ static DEFINE_SPINLOCK(kthread_create_lock); | |||
21 | static LIST_HEAD(kthread_create_list); | 21 | static LIST_HEAD(kthread_create_list); |
22 | struct task_struct *kthreadd_task; | 22 | struct task_struct *kthreadd_task; |
23 | 23 | ||
24 | DEFINE_TRACE(sched_kthread_stop); | ||
25 | DEFINE_TRACE(sched_kthread_stop_ret); | ||
26 | |||
24 | struct kthread_create_info | 27 | struct kthread_create_info |
25 | { | 28 | { |
26 | /* Information passed to kthread() from kthreadd. */ | 29 | /* Information passed to kthread() from kthreadd. */ |
diff --git a/kernel/marker.c b/kernel/marker.c index 2898b647d415..ea54f2647868 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -81,7 +81,7 @@ struct marker_entry { | |||
81 | * though the function pointer change and the marker enabling are two distinct | 81 | * though the function pointer change and the marker enabling are two distinct |
82 | * operations that modifies the execution flow of preemptible code. | 82 | * operations that modifies the execution flow of preemptible code. |
83 | */ | 83 | */ |
84 | void __mark_empty_function(void *probe_private, void *call_private, | 84 | notrace void __mark_empty_function(void *probe_private, void *call_private, |
85 | const char *fmt, va_list *args) | 85 | const char *fmt, va_list *args) |
86 | { | 86 | { |
87 | } | 87 | } |
@@ -97,7 +97,8 @@ EXPORT_SYMBOL_GPL(__mark_empty_function); | |||
97 | * need to put a full smp_rmb() in this branch. This is why we do not use | 97 | * need to put a full smp_rmb() in this branch. This is why we do not use |
98 | * rcu_dereference() for the pointer read. | 98 | * rcu_dereference() for the pointer read. |
99 | */ | 99 | */ |
100 | void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | 100 | notrace void marker_probe_cb(const struct marker *mdata, |
101 | void *call_private, ...) | ||
101 | { | 102 | { |
102 | va_list args; | 103 | va_list args; |
103 | char ptype; | 104 | char ptype; |
@@ -107,7 +108,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
107 | * sure the teardown of the callbacks can be done correctly when they | 108 | * sure the teardown of the callbacks can be done correctly when they |
108 | * are in modules and they insure RCU read coherency. | 109 | * are in modules and they insure RCU read coherency. |
109 | */ | 110 | */ |
110 | rcu_read_lock_sched(); | 111 | rcu_read_lock_sched_notrace(); |
111 | ptype = mdata->ptype; | 112 | ptype = mdata->ptype; |
112 | if (likely(!ptype)) { | 113 | if (likely(!ptype)) { |
113 | marker_probe_func *func; | 114 | marker_probe_func *func; |
@@ -145,7 +146,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
145 | va_end(args); | 146 | va_end(args); |
146 | } | 147 | } |
147 | } | 148 | } |
148 | rcu_read_unlock_sched(); | 149 | rcu_read_unlock_sched_notrace(); |
149 | } | 150 | } |
150 | EXPORT_SYMBOL_GPL(marker_probe_cb); | 151 | EXPORT_SYMBOL_GPL(marker_probe_cb); |
151 | 152 | ||
@@ -157,12 +158,13 @@ EXPORT_SYMBOL_GPL(marker_probe_cb); | |||
157 | * | 158 | * |
158 | * Should be connected to markers "MARK_NOARGS". | 159 | * Should be connected to markers "MARK_NOARGS". |
159 | */ | 160 | */ |
160 | static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | 161 | static notrace void marker_probe_cb_noarg(const struct marker *mdata, |
162 | void *call_private, ...) | ||
161 | { | 163 | { |
162 | va_list args; /* not initialized */ | 164 | va_list args; /* not initialized */ |
163 | char ptype; | 165 | char ptype; |
164 | 166 | ||
165 | rcu_read_lock_sched(); | 167 | rcu_read_lock_sched_notrace(); |
166 | ptype = mdata->ptype; | 168 | ptype = mdata->ptype; |
167 | if (likely(!ptype)) { | 169 | if (likely(!ptype)) { |
168 | marker_probe_func *func; | 170 | marker_probe_func *func; |
@@ -195,7 +197,7 @@ static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private | |||
195 | multi[i].func(multi[i].probe_private, call_private, | 197 | multi[i].func(multi[i].probe_private, call_private, |
196 | mdata->format, &args); | 198 | mdata->format, &args); |
197 | } | 199 | } |
198 | rcu_read_unlock_sched(); | 200 | rcu_read_unlock_sched_notrace(); |
199 | } | 201 | } |
200 | 202 | ||
201 | static void free_old_closure(struct rcu_head *head) | 203 | static void free_old_closure(struct rcu_head *head) |
@@ -477,7 +479,7 @@ static int marker_set_format(struct marker_entry *entry, const char *format) | |||
477 | static int set_marker(struct marker_entry *entry, struct marker *elem, | 479 | static int set_marker(struct marker_entry *entry, struct marker *elem, |
478 | int active) | 480 | int active) |
479 | { | 481 | { |
480 | int ret; | 482 | int ret = 0; |
481 | WARN_ON(strcmp(entry->name, elem->name) != 0); | 483 | WARN_ON(strcmp(entry->name, elem->name) != 0); |
482 | 484 | ||
483 | if (entry->format) { | 485 | if (entry->format) { |
@@ -529,9 +531,40 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, | |||
529 | */ | 531 | */ |
530 | smp_wmb(); | 532 | smp_wmb(); |
531 | elem->ptype = entry->ptype; | 533 | elem->ptype = entry->ptype; |
534 | |||
535 | if (elem->tp_name && (active ^ elem->state)) { | ||
536 | WARN_ON(!elem->tp_cb); | ||
537 | /* | ||
538 | * It is ok to directly call the probe registration because type | ||
539 | * checking has been done in the __trace_mark_tp() macro. | ||
540 | */ | ||
541 | |||
542 | if (active) { | ||
543 | /* | ||
544 | * try_module_get should always succeed because we hold | ||
545 | * lock_module() to get the tp_cb address. | ||
546 | */ | ||
547 | ret = try_module_get(__module_text_address( | ||
548 | (unsigned long)elem->tp_cb)); | ||
549 | BUG_ON(!ret); | ||
550 | ret = tracepoint_probe_register_noupdate( | ||
551 | elem->tp_name, | ||
552 | elem->tp_cb); | ||
553 | } else { | ||
554 | ret = tracepoint_probe_unregister_noupdate( | ||
555 | elem->tp_name, | ||
556 | elem->tp_cb); | ||
557 | /* | ||
558 | * tracepoint_probe_update_all() must be called | ||
559 | * before the module containing tp_cb is unloaded. | ||
560 | */ | ||
561 | module_put(__module_text_address( | ||
562 | (unsigned long)elem->tp_cb)); | ||
563 | } | ||
564 | } | ||
532 | elem->state = active; | 565 | elem->state = active; |
533 | 566 | ||
534 | return 0; | 567 | return ret; |
535 | } | 568 | } |
536 | 569 | ||
537 | /* | 570 | /* |
@@ -542,7 +575,24 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, | |||
542 | */ | 575 | */ |
543 | static void disable_marker(struct marker *elem) | 576 | static void disable_marker(struct marker *elem) |
544 | { | 577 | { |
578 | int ret; | ||
579 | |||
545 | /* leave "call" as is. It is known statically. */ | 580 | /* leave "call" as is. It is known statically. */ |
581 | if (elem->tp_name && elem->state) { | ||
582 | WARN_ON(!elem->tp_cb); | ||
583 | /* | ||
584 | * It is ok to directly call the probe registration because type | ||
585 | * checking has been done in the __trace_mark_tp() macro. | ||
586 | */ | ||
587 | ret = tracepoint_probe_unregister_noupdate(elem->tp_name, | ||
588 | elem->tp_cb); | ||
589 | WARN_ON(ret); | ||
590 | /* | ||
591 | * tracepoint_probe_update_all() must be called | ||
592 | * before the module containing tp_cb is unloaded. | ||
593 | */ | ||
594 | module_put(__module_text_address((unsigned long)elem->tp_cb)); | ||
595 | } | ||
546 | elem->state = 0; | 596 | elem->state = 0; |
547 | elem->single.func = __mark_empty_function; | 597 | elem->single.func = __mark_empty_function; |
548 | /* Update the function before setting the ptype */ | 598 | /* Update the function before setting the ptype */ |
@@ -606,6 +656,7 @@ static void marker_update_probes(void) | |||
606 | marker_update_probe_range(__start___markers, __stop___markers); | 656 | marker_update_probe_range(__start___markers, __stop___markers); |
607 | /* Markers in modules. */ | 657 | /* Markers in modules. */ |
608 | module_update_markers(); | 658 | module_update_markers(); |
659 | tracepoint_probe_update_all(); | ||
609 | } | 660 | } |
610 | 661 | ||
611 | /** | 662 | /** |
@@ -653,10 +704,11 @@ int marker_probe_register(const char *name, const char *format, | |||
653 | goto end; | 704 | goto end; |
654 | } | 705 | } |
655 | mutex_unlock(&markers_mutex); | 706 | mutex_unlock(&markers_mutex); |
656 | marker_update_probes(); /* may update entry */ | 707 | marker_update_probes(); |
657 | mutex_lock(&markers_mutex); | 708 | mutex_lock(&markers_mutex); |
658 | entry = get_marker(name); | 709 | entry = get_marker(name); |
659 | WARN_ON(!entry); | 710 | if (!entry) |
711 | goto end; | ||
660 | if (entry->rcu_pending) | 712 | if (entry->rcu_pending) |
661 | rcu_barrier_sched(); | 713 | rcu_barrier_sched(); |
662 | entry->oldptr = old; | 714 | entry->oldptr = old; |
@@ -697,7 +749,7 @@ int marker_probe_unregister(const char *name, | |||
697 | rcu_barrier_sched(); | 749 | rcu_barrier_sched(); |
698 | old = marker_entry_remove_probe(entry, probe, probe_private); | 750 | old = marker_entry_remove_probe(entry, probe, probe_private); |
699 | mutex_unlock(&markers_mutex); | 751 | mutex_unlock(&markers_mutex); |
700 | marker_update_probes(); /* may update entry */ | 752 | marker_update_probes(); |
701 | mutex_lock(&markers_mutex); | 753 | mutex_lock(&markers_mutex); |
702 | entry = get_marker(name); | 754 | entry = get_marker(name); |
703 | if (!entry) | 755 | if (!entry) |
@@ -778,10 +830,11 @@ int marker_probe_unregister_private_data(marker_probe_func *probe, | |||
778 | rcu_barrier_sched(); | 830 | rcu_barrier_sched(); |
779 | old = marker_entry_remove_probe(entry, NULL, probe_private); | 831 | old = marker_entry_remove_probe(entry, NULL, probe_private); |
780 | mutex_unlock(&markers_mutex); | 832 | mutex_unlock(&markers_mutex); |
781 | marker_update_probes(); /* may update entry */ | 833 | marker_update_probes(); |
782 | mutex_lock(&markers_mutex); | 834 | mutex_lock(&markers_mutex); |
783 | entry = get_marker_from_private_data(probe, probe_private); | 835 | entry = get_marker_from_private_data(probe, probe_private); |
784 | WARN_ON(!entry); | 836 | if (!entry) |
837 | goto end; | ||
785 | if (entry->rcu_pending) | 838 | if (entry->rcu_pending) |
786 | rcu_barrier_sched(); | 839 | rcu_barrier_sched(); |
787 | entry->oldptr = old; | 840 | entry->oldptr = old; |
@@ -842,3 +895,36 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
842 | return ERR_PTR(-ENOENT); | 895 | return ERR_PTR(-ENOENT); |
843 | } | 896 | } |
844 | EXPORT_SYMBOL_GPL(marker_get_private_data); | 897 | EXPORT_SYMBOL_GPL(marker_get_private_data); |
898 | |||
899 | #ifdef CONFIG_MODULES | ||
900 | |||
901 | int marker_module_notify(struct notifier_block *self, | ||
902 | unsigned long val, void *data) | ||
903 | { | ||
904 | struct module *mod = data; | ||
905 | |||
906 | switch (val) { | ||
907 | case MODULE_STATE_COMING: | ||
908 | marker_update_probe_range(mod->markers, | ||
909 | mod->markers + mod->num_markers); | ||
910 | break; | ||
911 | case MODULE_STATE_GOING: | ||
912 | marker_update_probe_range(mod->markers, | ||
913 | mod->markers + mod->num_markers); | ||
914 | break; | ||
915 | } | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | struct notifier_block marker_module_nb = { | ||
920 | .notifier_call = marker_module_notify, | ||
921 | .priority = 0, | ||
922 | }; | ||
923 | |||
924 | static int init_markers(void) | ||
925 | { | ||
926 | return register_module_notifier(&marker_module_nb); | ||
927 | } | ||
928 | __initcall(init_markers); | ||
929 | |||
930 | #endif /* CONFIG_MODULES */ | ||
diff --git a/kernel/module.c b/kernel/module.c index 1f4cc00e0c20..89bcf7c1327d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2184,24 +2184,15 @@ static noinline struct module *load_module(void __user *umod, | |||
2184 | struct mod_debug *debug; | 2184 | struct mod_debug *debug; |
2185 | unsigned int num_debug; | 2185 | unsigned int num_debug; |
2186 | 2186 | ||
2187 | #ifdef CONFIG_MARKERS | ||
2188 | marker_update_probe_range(mod->markers, | ||
2189 | mod->markers + mod->num_markers); | ||
2190 | #endif | ||
2191 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", | 2187 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", |
2192 | sizeof(*debug), &num_debug); | 2188 | sizeof(*debug), &num_debug); |
2193 | dynamic_printk_setup(debug, num_debug); | 2189 | dynamic_printk_setup(debug, num_debug); |
2194 | |||
2195 | #ifdef CONFIG_TRACEPOINTS | ||
2196 | tracepoint_update_probe_range(mod->tracepoints, | ||
2197 | mod->tracepoints + mod->num_tracepoints); | ||
2198 | #endif | ||
2199 | } | 2190 | } |
2200 | 2191 | ||
2201 | /* sechdrs[0].sh_size is always zero */ | 2192 | /* sechdrs[0].sh_size is always zero */ |
2202 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", | 2193 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", |
2203 | sizeof(*mseg), &num_mcount); | 2194 | sizeof(*mseg), &num_mcount); |
2204 | ftrace_init_module(mseg, mseg + num_mcount); | 2195 | ftrace_init_module(mod, mseg, mseg + num_mcount); |
2205 | 2196 | ||
2206 | err = module_finalize(hdr, sechdrs, mod); | 2197 | err = module_finalize(hdr, sechdrs, mod); |
2207 | if (err < 0) | 2198 | if (err < 0) |
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..327f91c63c99 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,6 +118,12 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
122 | /* | 128 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
diff --git a/kernel/signal.c b/kernel/signal.c index 4530fc654455..e9afe63da24b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -41,6 +41,8 @@ | |||
41 | 41 | ||
42 | static struct kmem_cache *sigqueue_cachep; | 42 | static struct kmem_cache *sigqueue_cachep; |
43 | 43 | ||
44 | DEFINE_TRACE(sched_signal_send); | ||
45 | |||
44 | static void __user *sig_handler(struct task_struct *t, int sig) | 46 | static void __user *sig_handler(struct task_struct *t, int sig) |
45 | { | 47 | { |
46 | return t->sighand->action[sig - 1].sa.sa_handler; | 48 | return t->sighand->action[sig - 1].sa.sa_handler; |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9c89526b6b7c..b8378fad29a3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -59,7 +59,6 @@ config FUNCTION_TRACER | |||
59 | 59 | ||
60 | config FUNCTION_RET_TRACER | 60 | config FUNCTION_RET_TRACER |
61 | bool "Kernel Function return Tracer" | 61 | bool "Kernel Function return Tracer" |
62 | depends on !DYNAMIC_FTRACE | ||
63 | depends on HAVE_FUNCTION_RET_TRACER | 62 | depends on HAVE_FUNCTION_RET_TRACER |
64 | depends on FUNCTION_TRACER | 63 | depends on FUNCTION_TRACER |
65 | help | 64 | help |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 54cb9a7d15e5..f212da486689 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -50,6 +50,9 @@ static int last_ftrace_enabled; | |||
50 | /* Quick disabling of function tracer. */ | 50 | /* Quick disabling of function tracer. */ |
51 | int function_trace_stop; | 51 | int function_trace_stop; |
52 | 52 | ||
53 | /* By default, current tracing type is normal tracing. */ | ||
54 | enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
55 | |||
53 | /* | 56 | /* |
54 | * ftrace_disabled is set when an anomaly is discovered. | 57 | * ftrace_disabled is set when an anomaly is discovered. |
55 | * ftrace_disabled is much stronger than ftrace_enabled. | 58 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -334,7 +337,7 @@ ftrace_record_ip(unsigned long ip) | |||
334 | { | 337 | { |
335 | struct dyn_ftrace *rec; | 338 | struct dyn_ftrace *rec; |
336 | 339 | ||
337 | if (!ftrace_enabled || ftrace_disabled) | 340 | if (ftrace_disabled) |
338 | return NULL; | 341 | return NULL; |
339 | 342 | ||
340 | rec = ftrace_alloc_dyn_node(ip); | 343 | rec = ftrace_alloc_dyn_node(ip); |
@@ -348,107 +351,138 @@ ftrace_record_ip(unsigned long ip) | |||
348 | return rec; | 351 | return rec; |
349 | } | 352 | } |
350 | 353 | ||
351 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 354 | static void print_ip_ins(const char *fmt, unsigned char *p) |
355 | { | ||
356 | int i; | ||
357 | |||
358 | printk(KERN_CONT "%s", fmt); | ||
359 | |||
360 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
361 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
362 | } | ||
363 | |||
364 | static void ftrace_bug(int failed, unsigned long ip) | ||
365 | { | ||
366 | switch (failed) { | ||
367 | case -EFAULT: | ||
368 | FTRACE_WARN_ON_ONCE(1); | ||
369 | pr_info("ftrace faulted on modifying "); | ||
370 | print_ip_sym(ip); | ||
371 | break; | ||
372 | case -EINVAL: | ||
373 | FTRACE_WARN_ON_ONCE(1); | ||
374 | pr_info("ftrace failed to modify "); | ||
375 | print_ip_sym(ip); | ||
376 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
377 | printk(KERN_CONT "\n"); | ||
378 | break; | ||
379 | case -EPERM: | ||
380 | FTRACE_WARN_ON_ONCE(1); | ||
381 | pr_info("ftrace faulted on writing "); | ||
382 | print_ip_sym(ip); | ||
383 | break; | ||
384 | default: | ||
385 | FTRACE_WARN_ON_ONCE(1); | ||
386 | pr_info("ftrace faulted on unknown error "); | ||
387 | print_ip_sym(ip); | ||
388 | } | ||
389 | } | ||
390 | |||
352 | 391 | ||
353 | static int | 392 | static int |
354 | __ftrace_replace_code(struct dyn_ftrace *rec, | 393 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
355 | unsigned char *old, unsigned char *new, int enable) | ||
356 | { | 394 | { |
357 | unsigned long ip, fl; | 395 | unsigned long ip, fl; |
396 | unsigned long ftrace_addr; | ||
397 | |||
398 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
399 | if (ftrace_tracing_type == FTRACE_TYPE_ENTER) | ||
400 | ftrace_addr = (unsigned long)ftrace_caller; | ||
401 | else | ||
402 | ftrace_addr = (unsigned long)ftrace_return_caller; | ||
403 | #else | ||
404 | ftrace_addr = (unsigned long)ftrace_caller; | ||
405 | #endif | ||
358 | 406 | ||
359 | ip = rec->ip; | 407 | ip = rec->ip; |
360 | 408 | ||
361 | if (ftrace_filtered && enable) { | 409 | /* |
410 | * If this record is not to be traced and | ||
411 | * it is not enabled then do nothing. | ||
412 | * | ||
413 | * If this record is not to be traced and | ||
414 | * it is enabled then disabled it. | ||
415 | * | ||
416 | */ | ||
417 | if (rec->flags & FTRACE_FL_NOTRACE) { | ||
418 | if (rec->flags & FTRACE_FL_ENABLED) | ||
419 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
420 | else | ||
421 | return 0; | ||
422 | |||
423 | } else if (ftrace_filtered && enable) { | ||
362 | /* | 424 | /* |
363 | * If filtering is on: | 425 | * Filtering is on: |
364 | * | ||
365 | * If this record is set to be filtered and | ||
366 | * is enabled then do nothing. | ||
367 | * | ||
368 | * If this record is set to be filtered and | ||
369 | * it is not enabled, enable it. | ||
370 | * | ||
371 | * If this record is not set to be filtered | ||
372 | * and it is not enabled do nothing. | ||
373 | * | ||
374 | * If this record is set not to trace then | ||
375 | * do nothing. | ||
376 | * | ||
377 | * If this record is set not to trace and | ||
378 | * it is enabled then disable it. | ||
379 | * | ||
380 | * If this record is not set to be filtered and | ||
381 | * it is enabled, disable it. | ||
382 | */ | 426 | */ |
383 | 427 | ||
384 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | | 428 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); |
385 | FTRACE_FL_ENABLED); | ||
386 | 429 | ||
387 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | 430 | /* Record is filtered and enabled, do nothing */ |
388 | (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || | 431 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
389 | !fl || (fl == FTRACE_FL_NOTRACE)) | ||
390 | return 0; | 432 | return 0; |
391 | 433 | ||
392 | /* | 434 | /* Record is not filtered and is not enabled do nothing */ |
393 | * If it is enabled disable it, | 435 | if (!fl) |
394 | * otherwise enable it! | 436 | return 0; |
395 | */ | 437 | |
396 | if (fl & FTRACE_FL_ENABLED) { | 438 | /* Record is not filtered but enabled, disable it */ |
397 | /* swap new and old */ | 439 | if (fl == FTRACE_FL_ENABLED) |
398 | new = old; | ||
399 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
400 | rec->flags &= ~FTRACE_FL_ENABLED; | 440 | rec->flags &= ~FTRACE_FL_ENABLED; |
401 | } else { | 441 | else |
402 | new = ftrace_call_replace(ip, FTRACE_ADDR); | 442 | /* Otherwise record is filtered but not enabled, enable it */ |
403 | rec->flags |= FTRACE_FL_ENABLED; | 443 | rec->flags |= FTRACE_FL_ENABLED; |
404 | } | ||
405 | } else { | 444 | } else { |
445 | /* Disable or not filtered */ | ||
406 | 446 | ||
407 | if (enable) { | 447 | if (enable) { |
408 | /* | 448 | /* if record is enabled, do nothing */ |
409 | * If this record is set not to trace and is | ||
410 | * not enabled, do nothing. | ||
411 | */ | ||
412 | fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); | ||
413 | if (fl == FTRACE_FL_NOTRACE) | ||
414 | return 0; | ||
415 | |||
416 | new = ftrace_call_replace(ip, FTRACE_ADDR); | ||
417 | } else | ||
418 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
419 | |||
420 | if (enable) { | ||
421 | if (rec->flags & FTRACE_FL_ENABLED) | 449 | if (rec->flags & FTRACE_FL_ENABLED) |
422 | return 0; | 450 | return 0; |
451 | |||
423 | rec->flags |= FTRACE_FL_ENABLED; | 452 | rec->flags |= FTRACE_FL_ENABLED; |
453 | |||
424 | } else { | 454 | } else { |
455 | |||
456 | /* if record is not enabled do nothing */ | ||
425 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 457 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
426 | return 0; | 458 | return 0; |
459 | |||
427 | rec->flags &= ~FTRACE_FL_ENABLED; | 460 | rec->flags &= ~FTRACE_FL_ENABLED; |
428 | } | 461 | } |
429 | } | 462 | } |
430 | 463 | ||
431 | return ftrace_modify_code(ip, old, new); | 464 | if (rec->flags & FTRACE_FL_ENABLED) |
465 | return ftrace_make_call(rec, ftrace_addr); | ||
466 | else | ||
467 | return ftrace_make_nop(NULL, rec, ftrace_addr); | ||
432 | } | 468 | } |
433 | 469 | ||
434 | static void ftrace_replace_code(int enable) | 470 | static void ftrace_replace_code(int enable) |
435 | { | 471 | { |
436 | int i, failed; | 472 | int i, failed; |
437 | unsigned char *new = NULL, *old = NULL; | ||
438 | struct dyn_ftrace *rec; | 473 | struct dyn_ftrace *rec; |
439 | struct ftrace_page *pg; | 474 | struct ftrace_page *pg; |
440 | 475 | ||
441 | if (enable) | ||
442 | old = ftrace_nop_replace(); | ||
443 | else | ||
444 | new = ftrace_nop_replace(); | ||
445 | |||
446 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 476 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
447 | for (i = 0; i < pg->index; i++) { | 477 | for (i = 0; i < pg->index; i++) { |
448 | rec = &pg->records[i]; | 478 | rec = &pg->records[i]; |
449 | 479 | ||
450 | /* don't modify code that has already faulted */ | 480 | /* |
451 | if (rec->flags & FTRACE_FL_FAILED) | 481 | * Skip over free records and records that have |
482 | * failed. | ||
483 | */ | ||
484 | if (rec->flags & FTRACE_FL_FREE || | ||
485 | rec->flags & FTRACE_FL_FAILED) | ||
452 | continue; | 486 | continue; |
453 | 487 | ||
454 | /* ignore updates to this record's mcount site */ | 488 | /* ignore updates to this record's mcount site */ |
@@ -459,68 +493,30 @@ static void ftrace_replace_code(int enable) | |||
459 | unfreeze_record(rec); | 493 | unfreeze_record(rec); |
460 | } | 494 | } |
461 | 495 | ||
462 | failed = __ftrace_replace_code(rec, old, new, enable); | 496 | failed = __ftrace_replace_code(rec, enable); |
463 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 497 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
464 | rec->flags |= FTRACE_FL_FAILED; | 498 | rec->flags |= FTRACE_FL_FAILED; |
465 | if ((system_state == SYSTEM_BOOTING) || | 499 | if ((system_state == SYSTEM_BOOTING) || |
466 | !core_kernel_text(rec->ip)) { | 500 | !core_kernel_text(rec->ip)) { |
467 | ftrace_free_rec(rec); | 501 | ftrace_free_rec(rec); |
468 | } | 502 | } else |
503 | ftrace_bug(failed, rec->ip); | ||
469 | } | 504 | } |
470 | } | 505 | } |
471 | } | 506 | } |
472 | } | 507 | } |
473 | 508 | ||
474 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
475 | { | ||
476 | int i; | ||
477 | |||
478 | printk(KERN_CONT "%s", fmt); | ||
479 | |||
480 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
481 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
482 | } | ||
483 | |||
484 | static int | 509 | static int |
485 | ftrace_code_disable(struct dyn_ftrace *rec) | 510 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
486 | { | 511 | { |
487 | unsigned long ip; | 512 | unsigned long ip; |
488 | unsigned char *nop, *call; | ||
489 | int ret; | 513 | int ret; |
490 | 514 | ||
491 | ip = rec->ip; | 515 | ip = rec->ip; |
492 | 516 | ||
493 | nop = ftrace_nop_replace(); | 517 | ret = ftrace_make_nop(mod, rec, mcount_addr); |
494 | call = ftrace_call_replace(ip, mcount_addr); | ||
495 | |||
496 | ret = ftrace_modify_code(ip, call, nop); | ||
497 | if (ret) { | 518 | if (ret) { |
498 | switch (ret) { | 519 | ftrace_bug(ret, ip); |
499 | case -EFAULT: | ||
500 | FTRACE_WARN_ON_ONCE(1); | ||
501 | pr_info("ftrace faulted on modifying "); | ||
502 | print_ip_sym(ip); | ||
503 | break; | ||
504 | case -EINVAL: | ||
505 | FTRACE_WARN_ON_ONCE(1); | ||
506 | pr_info("ftrace failed to modify "); | ||
507 | print_ip_sym(ip); | ||
508 | print_ip_ins(" expected: ", call); | ||
509 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
510 | print_ip_ins(" replace: ", nop); | ||
511 | printk(KERN_CONT "\n"); | ||
512 | break; | ||
513 | case -EPERM: | ||
514 | FTRACE_WARN_ON_ONCE(1); | ||
515 | pr_info("ftrace faulted on writing "); | ||
516 | print_ip_sym(ip); | ||
517 | break; | ||
518 | default: | ||
519 | FTRACE_WARN_ON_ONCE(1); | ||
520 | pr_info("ftrace faulted on unknown error "); | ||
521 | print_ip_sym(ip); | ||
522 | } | ||
523 | |||
524 | rec->flags |= FTRACE_FL_FAILED; | 520 | rec->flags |= FTRACE_FL_FAILED; |
525 | return 0; | 521 | return 0; |
526 | } | 522 | } |
@@ -560,8 +556,7 @@ static void ftrace_startup(void) | |||
560 | 556 | ||
561 | mutex_lock(&ftrace_start_lock); | 557 | mutex_lock(&ftrace_start_lock); |
562 | ftrace_start_up++; | 558 | ftrace_start_up++; |
563 | if (ftrace_start_up == 1) | 559 | command |= FTRACE_ENABLE_CALLS; |
564 | command |= FTRACE_ENABLE_CALLS; | ||
565 | 560 | ||
566 | if (saved_ftrace_func != ftrace_trace_function) { | 561 | if (saved_ftrace_func != ftrace_trace_function) { |
567 | saved_ftrace_func = ftrace_trace_function; | 562 | saved_ftrace_func = ftrace_trace_function; |
@@ -639,7 +634,7 @@ static cycle_t ftrace_update_time; | |||
639 | static unsigned long ftrace_update_cnt; | 634 | static unsigned long ftrace_update_cnt; |
640 | unsigned long ftrace_update_tot_cnt; | 635 | unsigned long ftrace_update_tot_cnt; |
641 | 636 | ||
642 | static int ftrace_update_code(void) | 637 | static int ftrace_update_code(struct module *mod) |
643 | { | 638 | { |
644 | struct dyn_ftrace *p, *t; | 639 | struct dyn_ftrace *p, *t; |
645 | cycle_t start, stop; | 640 | cycle_t start, stop; |
@@ -656,7 +651,7 @@ static int ftrace_update_code(void) | |||
656 | list_del_init(&p->list); | 651 | list_del_init(&p->list); |
657 | 652 | ||
658 | /* convert record (i.e, patch mcount-call with NOP) */ | 653 | /* convert record (i.e, patch mcount-call with NOP) */ |
659 | if (ftrace_code_disable(p)) { | 654 | if (ftrace_code_disable(mod, p)) { |
660 | p->flags |= FTRACE_FL_CONVERTED; | 655 | p->flags |= FTRACE_FL_CONVERTED; |
661 | ftrace_update_cnt++; | 656 | ftrace_update_cnt++; |
662 | } else | 657 | } else |
@@ -699,7 +694,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
699 | 694 | ||
700 | cnt = num_to_init / ENTRIES_PER_PAGE; | 695 | cnt = num_to_init / ENTRIES_PER_PAGE; |
701 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 696 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
702 | num_to_init, cnt); | 697 | num_to_init, cnt + 1); |
703 | 698 | ||
704 | for (i = 0; i < cnt; i++) { | 699 | for (i = 0; i < cnt; i++) { |
705 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 700 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -782,13 +777,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
782 | void *p = NULL; | 777 | void *p = NULL; |
783 | loff_t l = -1; | 778 | loff_t l = -1; |
784 | 779 | ||
785 | if (*pos != iter->pos) { | 780 | if (*pos > iter->pos) |
786 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | 781 | *pos = iter->pos; |
787 | ; | 782 | |
788 | } else { | 783 | l = *pos; |
789 | l = *pos; | 784 | p = t_next(m, p, &l); |
790 | p = t_next(m, p, &l); | ||
791 | } | ||
792 | 785 | ||
793 | return p; | 786 | return p; |
794 | } | 787 | } |
@@ -799,15 +792,21 @@ static void t_stop(struct seq_file *m, void *p) | |||
799 | 792 | ||
800 | static int t_show(struct seq_file *m, void *v) | 793 | static int t_show(struct seq_file *m, void *v) |
801 | { | 794 | { |
795 | struct ftrace_iterator *iter = m->private; | ||
802 | struct dyn_ftrace *rec = v; | 796 | struct dyn_ftrace *rec = v; |
803 | char str[KSYM_SYMBOL_LEN]; | 797 | char str[KSYM_SYMBOL_LEN]; |
798 | int ret = 0; | ||
804 | 799 | ||
805 | if (!rec) | 800 | if (!rec) |
806 | return 0; | 801 | return 0; |
807 | 802 | ||
808 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 803 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
809 | 804 | ||
810 | seq_printf(m, "%s\n", str); | 805 | ret = seq_printf(m, "%s\n", str); |
806 | if (ret < 0) { | ||
807 | iter->pos--; | ||
808 | iter->idx--; | ||
809 | } | ||
811 | 810 | ||
812 | return 0; | 811 | return 0; |
813 | } | 812 | } |
@@ -833,7 +832,7 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
833 | return -ENOMEM; | 832 | return -ENOMEM; |
834 | 833 | ||
835 | iter->pg = ftrace_pages_start; | 834 | iter->pg = ftrace_pages_start; |
836 | iter->pos = -1; | 835 | iter->pos = 0; |
837 | 836 | ||
838 | ret = seq_open(file, &show_ftrace_seq_ops); | 837 | ret = seq_open(file, &show_ftrace_seq_ops); |
839 | if (!ret) { | 838 | if (!ret) { |
@@ -920,7 +919,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
920 | 919 | ||
921 | if (file->f_mode & FMODE_READ) { | 920 | if (file->f_mode & FMODE_READ) { |
922 | iter->pg = ftrace_pages_start; | 921 | iter->pg = ftrace_pages_start; |
923 | iter->pos = -1; | 922 | iter->pos = 0; |
924 | iter->flags = enable ? FTRACE_ITER_FILTER : | 923 | iter->flags = enable ? FTRACE_ITER_FILTER : |
925 | FTRACE_ITER_NOTRACE; | 924 | FTRACE_ITER_NOTRACE; |
926 | 925 | ||
@@ -1211,7 +1210,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1211 | 1210 | ||
1212 | mutex_lock(&ftrace_sysctl_lock); | 1211 | mutex_lock(&ftrace_sysctl_lock); |
1213 | mutex_lock(&ftrace_start_lock); | 1212 | mutex_lock(&ftrace_start_lock); |
1214 | if (iter->filtered && ftrace_start_up && ftrace_enabled) | 1213 | if (ftrace_start_up && ftrace_enabled) |
1215 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1214 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1216 | mutex_unlock(&ftrace_start_lock); | 1215 | mutex_unlock(&ftrace_start_lock); |
1217 | mutex_unlock(&ftrace_sysctl_lock); | 1216 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1298,7 +1297,8 @@ static __init int ftrace_init_debugfs(void) | |||
1298 | 1297 | ||
1299 | fs_initcall(ftrace_init_debugfs); | 1298 | fs_initcall(ftrace_init_debugfs); |
1300 | 1299 | ||
1301 | static int ftrace_convert_nops(unsigned long *start, | 1300 | static int ftrace_convert_nops(struct module *mod, |
1301 | unsigned long *start, | ||
1302 | unsigned long *end) | 1302 | unsigned long *end) |
1303 | { | 1303 | { |
1304 | unsigned long *p; | 1304 | unsigned long *p; |
@@ -1309,23 +1309,32 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1309 | p = start; | 1309 | p = start; |
1310 | while (p < end) { | 1310 | while (p < end) { |
1311 | addr = ftrace_call_adjust(*p++); | 1311 | addr = ftrace_call_adjust(*p++); |
1312 | /* | ||
1313 | * Some architecture linkers will pad between | ||
1314 | * the different mcount_loc sections of different | ||
1315 | * object files to satisfy alignments. | ||
1316 | * Skip any NULL pointers. | ||
1317 | */ | ||
1318 | if (!addr) | ||
1319 | continue; | ||
1312 | ftrace_record_ip(addr); | 1320 | ftrace_record_ip(addr); |
1313 | } | 1321 | } |
1314 | 1322 | ||
1315 | /* disable interrupts to prevent kstop machine */ | 1323 | /* disable interrupts to prevent kstop machine */ |
1316 | local_irq_save(flags); | 1324 | local_irq_save(flags); |
1317 | ftrace_update_code(); | 1325 | ftrace_update_code(mod); |
1318 | local_irq_restore(flags); | 1326 | local_irq_restore(flags); |
1319 | mutex_unlock(&ftrace_start_lock); | 1327 | mutex_unlock(&ftrace_start_lock); |
1320 | 1328 | ||
1321 | return 0; | 1329 | return 0; |
1322 | } | 1330 | } |
1323 | 1331 | ||
1324 | void ftrace_init_module(unsigned long *start, unsigned long *end) | 1332 | void ftrace_init_module(struct module *mod, |
1333 | unsigned long *start, unsigned long *end) | ||
1325 | { | 1334 | { |
1326 | if (ftrace_disabled || start == end) | 1335 | if (ftrace_disabled || start == end) |
1327 | return; | 1336 | return; |
1328 | ftrace_convert_nops(start, end); | 1337 | ftrace_convert_nops(mod, start, end); |
1329 | } | 1338 | } |
1330 | 1339 | ||
1331 | extern unsigned long __start_mcount_loc[]; | 1340 | extern unsigned long __start_mcount_loc[]; |
@@ -1355,7 +1364,8 @@ void __init ftrace_init(void) | |||
1355 | 1364 | ||
1356 | last_ftrace_enabled = ftrace_enabled = 1; | 1365 | last_ftrace_enabled = ftrace_enabled = 1; |
1357 | 1366 | ||
1358 | ret = ftrace_convert_nops(__start_mcount_loc, | 1367 | ret = ftrace_convert_nops(NULL, |
1368 | __start_mcount_loc, | ||
1359 | __stop_mcount_loc); | 1369 | __stop_mcount_loc); |
1360 | 1370 | ||
1361 | return; | 1371 | return; |
@@ -1411,10 +1421,17 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1411 | return -1; | 1421 | return -1; |
1412 | 1422 | ||
1413 | mutex_lock(&ftrace_sysctl_lock); | 1423 | mutex_lock(&ftrace_sysctl_lock); |
1424 | |||
1425 | if (ftrace_tracing_type == FTRACE_TYPE_RETURN) { | ||
1426 | ret = -EBUSY; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | |||
1414 | ret = __register_ftrace_function(ops); | 1430 | ret = __register_ftrace_function(ops); |
1415 | ftrace_startup(); | 1431 | ftrace_startup(); |
1416 | mutex_unlock(&ftrace_sysctl_lock); | ||
1417 | 1432 | ||
1433 | out: | ||
1434 | mutex_unlock(&ftrace_sysctl_lock); | ||
1418 | return ret; | 1435 | return ret; |
1419 | } | 1436 | } |
1420 | 1437 | ||
@@ -1480,16 +1497,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1480 | } | 1497 | } |
1481 | 1498 | ||
1482 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1499 | #ifdef CONFIG_FUNCTION_RET_TRACER |
1500 | |||
1501 | /* The callback that hooks the return of a function */ | ||
1483 | trace_function_return_t ftrace_function_return = | 1502 | trace_function_return_t ftrace_function_return = |
1484 | (trace_function_return_t)ftrace_stub; | 1503 | (trace_function_return_t)ftrace_stub; |
1485 | void register_ftrace_return(trace_function_return_t func) | 1504 | |
1505 | int register_ftrace_return(trace_function_return_t func) | ||
1486 | { | 1506 | { |
1507 | int ret = 0; | ||
1508 | |||
1509 | mutex_lock(&ftrace_sysctl_lock); | ||
1510 | |||
1511 | /* | ||
1512 | * Don't launch return tracing if normal function | ||
1513 | * tracing is already running. | ||
1514 | */ | ||
1515 | if (ftrace_trace_function != ftrace_stub) { | ||
1516 | ret = -EBUSY; | ||
1517 | goto out; | ||
1518 | } | ||
1519 | |||
1520 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | ||
1487 | ftrace_function_return = func; | 1521 | ftrace_function_return = func; |
1522 | ftrace_startup(); | ||
1523 | |||
1524 | out: | ||
1525 | mutex_unlock(&ftrace_sysctl_lock); | ||
1526 | return ret; | ||
1488 | } | 1527 | } |
1489 | 1528 | ||
1490 | void unregister_ftrace_return(void) | 1529 | void unregister_ftrace_return(void) |
1491 | { | 1530 | { |
1531 | mutex_lock(&ftrace_sysctl_lock); | ||
1532 | |||
1492 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | 1533 | ftrace_function_return = (trace_function_return_t)ftrace_stub; |
1534 | ftrace_shutdown(); | ||
1535 | /* Restore normal tracing type */ | ||
1536 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
1537 | |||
1538 | mutex_unlock(&ftrace_sysctl_lock); | ||
1493 | } | 1539 | } |
1494 | #endif | 1540 | #endif |
1495 | 1541 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4a904623e05d..396fda034e3f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1051,7 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1051 | * Need to use raw, since this must be called before the | 1051 | * Need to use raw, since this must be called before the |
1052 | * recursive protection is performed. | 1052 | * recursive protection is performed. |
1053 | */ | 1053 | */ |
1054 | raw_local_irq_save(flags); | 1054 | local_irq_save(flags); |
1055 | cpu = raw_smp_processor_id(); | 1055 | cpu = raw_smp_processor_id(); |
1056 | data = tr->data[cpu]; | 1056 | data = tr->data[cpu]; |
1057 | disabled = atomic_inc_return(&data->disabled); | 1057 | disabled = atomic_inc_return(&data->disabled); |
@@ -1062,7 +1062,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | atomic_dec(&data->disabled); | 1064 | atomic_dec(&data->disabled); |
1065 | raw_local_irq_restore(flags); | 1065 | local_irq_restore(flags); |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1068 | #ifdef CONFIG_FUNCTION_RET_TRACER |
@@ -2638,8 +2638,11 @@ static int tracing_set_tracer(char *buf) | |||
2638 | current_trace->reset(tr); | 2638 | current_trace->reset(tr); |
2639 | 2639 | ||
2640 | current_trace = t; | 2640 | current_trace = t; |
2641 | if (t->init) | 2641 | if (t->init) { |
2642 | t->init(tr); | 2642 | ret = t->init(tr); |
2643 | if (ret) | ||
2644 | goto out; | ||
2645 | } | ||
2643 | 2646 | ||
2644 | trace_branch_enable(tr); | 2647 | trace_branch_enable(tr); |
2645 | out: | 2648 | out: |
@@ -2655,6 +2658,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2655 | char buf[max_tracer_type_len+1]; | 2658 | char buf[max_tracer_type_len+1]; |
2656 | int i; | 2659 | int i; |
2657 | size_t ret; | 2660 | size_t ret; |
2661 | int err; | ||
2662 | |||
2663 | ret = cnt; | ||
2658 | 2664 | ||
2659 | if (cnt > max_tracer_type_len) | 2665 | if (cnt > max_tracer_type_len) |
2660 | cnt = max_tracer_type_len; | 2666 | cnt = max_tracer_type_len; |
@@ -2668,12 +2674,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2668 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 2674 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
2669 | buf[i] = 0; | 2675 | buf[i] = 0; |
2670 | 2676 | ||
2671 | ret = tracing_set_tracer(buf); | 2677 | err = tracing_set_tracer(buf); |
2672 | if (!ret) | 2678 | if (err) |
2673 | ret = cnt; | 2679 | return err; |
2674 | 2680 | ||
2675 | if (ret > 0) | 2681 | filp->f_pos += ret; |
2676 | filp->f_pos += ret; | ||
2677 | 2682 | ||
2678 | return ret; | 2683 | return ret; |
2679 | } | 2684 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 790ea8c0e1f3..cdbd5cc22be8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -264,7 +264,8 @@ enum print_line_t { | |||
264 | */ | 264 | */ |
265 | struct tracer { | 265 | struct tracer { |
266 | const char *name; | 266 | const char *name; |
267 | void (*init)(struct trace_array *tr); | 267 | /* Your tracer should raise a warning if init fails */ |
268 | int (*init)(struct trace_array *tr); | ||
268 | void (*reset)(struct trace_array *tr); | 269 | void (*reset)(struct trace_array *tr); |
269 | void (*start)(struct trace_array *tr); | 270 | void (*start)(struct trace_array *tr); |
270 | void (*stop)(struct trace_array *tr); | 271 | void (*stop)(struct trace_array *tr); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index cb333b7fd113..a4fa2c57e34e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -47,7 +47,7 @@ static void reset_boot_trace(struct trace_array *tr) | |||
47 | tracing_reset(tr, cpu); | 47 | tracing_reset(tr, cpu); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void boot_trace_init(struct trace_array *tr) | 50 | static int boot_trace_init(struct trace_array *tr) |
51 | { | 51 | { |
52 | int cpu; | 52 | int cpu; |
53 | boot_trace = tr; | 53 | boot_trace = tr; |
@@ -56,6 +56,7 @@ static void boot_trace_init(struct trace_array *tr) | |||
56 | tracing_reset(tr, cpu); | 56 | tracing_reset(tr, cpu); |
57 | 57 | ||
58 | tracing_sched_switch_assign_trace(tr); | 58 | tracing_sched_switch_assign_trace(tr); |
59 | return 0; | ||
59 | } | 60 | } |
60 | 61 | ||
61 | static enum print_line_t | 62 | static enum print_line_t |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 2511e32572ca..23f9b02ce967 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -125,7 +125,7 @@ static void stop_branch_trace(struct trace_array *tr) | |||
125 | disable_branch_tracing(); | 125 | disable_branch_tracing(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void branch_trace_init(struct trace_array *tr) | 128 | static int branch_trace_init(struct trace_array *tr) |
129 | { | 129 | { |
130 | int cpu; | 130 | int cpu; |
131 | 131 | ||
@@ -133,6 +133,7 @@ static void branch_trace_init(struct trace_array *tr) | |||
133 | tracing_reset(tr, cpu); | 133 | tracing_reset(tr, cpu); |
134 | 134 | ||
135 | start_branch_trace(tr); | 135 | start_branch_trace(tr); |
136 | return 0; | ||
136 | } | 137 | } |
137 | 138 | ||
138 | static void branch_trace_reset(struct trace_array *tr) | 139 | static void branch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8693b7a0a5b2..e74f6d0a3216 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -42,9 +42,10 @@ static void stop_function_trace(struct trace_array *tr) | |||
42 | tracing_stop_cmdline_record(); | 42 | tracing_stop_cmdline_record(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static void function_trace_init(struct trace_array *tr) | 45 | static int function_trace_init(struct trace_array *tr) |
46 | { | 46 | { |
47 | start_function_trace(tr); | 47 | start_function_trace(tr); |
48 | return 0; | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static void function_trace_reset(struct trace_array *tr) | 51 | static void function_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c index 7680b21537dd..a68564af022b 100644 --- a/kernel/trace/trace_functions_return.c +++ b/kernel/trace/trace_functions_return.c | |||
@@ -14,28 +14,18 @@ | |||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | 15 | ||
16 | 16 | ||
17 | static void start_return_trace(struct trace_array *tr) | 17 | static int return_trace_init(struct trace_array *tr) |
18 | { | ||
19 | register_ftrace_return(&trace_function_return); | ||
20 | } | ||
21 | |||
22 | static void stop_return_trace(struct trace_array *tr) | ||
23 | { | ||
24 | unregister_ftrace_return(); | ||
25 | } | ||
26 | |||
27 | static void return_trace_init(struct trace_array *tr) | ||
28 | { | 18 | { |
29 | int cpu; | 19 | int cpu; |
30 | for_each_online_cpu(cpu) | 20 | for_each_online_cpu(cpu) |
31 | tracing_reset(tr, cpu); | 21 | tracing_reset(tr, cpu); |
32 | 22 | ||
33 | start_return_trace(tr); | 23 | return register_ftrace_return(&trace_function_return); |
34 | } | 24 | } |
35 | 25 | ||
36 | static void return_trace_reset(struct trace_array *tr) | 26 | static void return_trace_reset(struct trace_array *tr) |
37 | { | 27 | { |
38 | stop_return_trace(tr); | 28 | unregister_ftrace_return(); |
39 | } | 29 | } |
40 | 30 | ||
41 | 31 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d919d4eaa7cc..7c2e326bbc8b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -416,11 +416,12 @@ static void irqsoff_tracer_close(struct trace_iterator *iter) | |||
416 | } | 416 | } |
417 | 417 | ||
418 | #ifdef CONFIG_IRQSOFF_TRACER | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
419 | static void irqsoff_tracer_init(struct trace_array *tr) | 419 | static int irqsoff_tracer_init(struct trace_array *tr) |
420 | { | 420 | { |
421 | trace_type = TRACER_IRQS_OFF; | 421 | trace_type = TRACER_IRQS_OFF; |
422 | 422 | ||
423 | __irqsoff_tracer_init(tr); | 423 | __irqsoff_tracer_init(tr); |
424 | return 0; | ||
424 | } | 425 | } |
425 | static struct tracer irqsoff_tracer __read_mostly = | 426 | static struct tracer irqsoff_tracer __read_mostly = |
426 | { | 427 | { |
@@ -442,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
442 | #endif | 443 | #endif |
443 | 444 | ||
444 | #ifdef CONFIG_PREEMPT_TRACER | 445 | #ifdef CONFIG_PREEMPT_TRACER |
445 | static void preemptoff_tracer_init(struct trace_array *tr) | 446 | static int preemptoff_tracer_init(struct trace_array *tr) |
446 | { | 447 | { |
447 | trace_type = TRACER_PREEMPT_OFF; | 448 | trace_type = TRACER_PREEMPT_OFF; |
448 | 449 | ||
449 | __irqsoff_tracer_init(tr); | 450 | __irqsoff_tracer_init(tr); |
451 | return 0; | ||
450 | } | 452 | } |
451 | 453 | ||
452 | static struct tracer preemptoff_tracer __read_mostly = | 454 | static struct tracer preemptoff_tracer __read_mostly = |
@@ -471,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
471 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 473 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
472 | defined(CONFIG_PREEMPT_TRACER) | 474 | defined(CONFIG_PREEMPT_TRACER) |
473 | 475 | ||
474 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | 476 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
475 | { | 477 | { |
476 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 478 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
477 | 479 | ||
478 | __irqsoff_tracer_init(tr); | 480 | __irqsoff_tracer_init(tr); |
481 | return 0; | ||
479 | } | 482 | } |
480 | 483 | ||
481 | static struct tracer preemptirqsoff_tracer __read_mostly = | 484 | static struct tracer preemptirqsoff_tracer __read_mostly = |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 51bcf370215e..433d650eda9f 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -30,13 +30,14 @@ static void mmio_reset_data(struct trace_array *tr) | |||
30 | tracing_reset(tr, cpu); | 30 | tracing_reset(tr, cpu); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void mmio_trace_init(struct trace_array *tr) | 33 | static int mmio_trace_init(struct trace_array *tr) |
34 | { | 34 | { |
35 | pr_debug("in %s\n", __func__); | 35 | pr_debug("in %s\n", __func__); |
36 | mmio_trace_array = tr; | 36 | mmio_trace_array = tr; |
37 | 37 | ||
38 | mmio_reset_data(tr); | 38 | mmio_reset_data(tr); |
39 | enable_mmiotrace(); | 39 | enable_mmiotrace(); |
40 | return 0; | ||
40 | } | 41 | } |
41 | 42 | ||
42 | static void mmio_trace_reset(struct trace_array *tr) | 43 | static void mmio_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 2ef1d227e7d8..0e77415caed3 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -24,7 +24,7 @@ static void stop_nop_trace(struct trace_array *tr) | |||
24 | /* Nothing to do! */ | 24 | /* Nothing to do! */ |
25 | } | 25 | } |
26 | 26 | ||
27 | static void nop_trace_init(struct trace_array *tr) | 27 | static int nop_trace_init(struct trace_array *tr) |
28 | { | 28 | { |
29 | int cpu; | 29 | int cpu; |
30 | ctx_trace = tr; | 30 | ctx_trace = tr; |
@@ -33,6 +33,7 @@ static void nop_trace_init(struct trace_array *tr) | |||
33 | tracing_reset(tr, cpu); | 33 | tracing_reset(tr, cpu); |
34 | 34 | ||
35 | start_nop_trace(tr); | 35 | start_nop_trace(tr); |
36 | return 0; | ||
36 | } | 37 | } |
37 | 38 | ||
38 | static void nop_trace_reset(struct trace_array *tr) | 39 | static void nop_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index be35bdfe2e38..863390557b44 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -206,10 +206,11 @@ static void stop_sched_trace(struct trace_array *tr) | |||
206 | tracing_stop_sched_switch_record(); | 206 | tracing_stop_sched_switch_record(); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void sched_switch_trace_init(struct trace_array *tr) | 209 | static int sched_switch_trace_init(struct trace_array *tr) |
210 | { | 210 | { |
211 | ctx_trace = tr; | 211 | ctx_trace = tr; |
212 | start_sched_trace(tr); | 212 | start_sched_trace(tr); |
213 | return 0; | ||
213 | } | 214 | } |
214 | 215 | ||
215 | static void sched_switch_trace_reset(struct trace_array *tr) | 216 | static void sched_switch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 983f2b1478c9..0067b49746c1 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -331,10 +331,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
331 | unregister_trace_sched_wakeup(probe_wakeup); | 331 | unregister_trace_sched_wakeup(probe_wakeup); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
335 | { | 335 | { |
336 | wakeup_trace = tr; | 336 | wakeup_trace = tr; |
337 | start_wakeup_tracer(tr); | 337 | start_wakeup_tracer(tr); |
338 | return 0; | ||
338 | } | 339 | } |
339 | 340 | ||
340 | static void wakeup_tracer_reset(struct trace_array *tr) | 341 | static void wakeup_tracer_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 24e6e075e6d6..88c8eb70f54a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -52,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
52 | int cpu, ret = 0; | 52 | int cpu, ret = 0; |
53 | 53 | ||
54 | /* Don't allow flipping of max traces now */ | 54 | /* Don't allow flipping of max traces now */ |
55 | raw_local_irq_save(flags); | 55 | local_irq_save(flags); |
56 | __raw_spin_lock(&ftrace_max_lock); | 56 | __raw_spin_lock(&ftrace_max_lock); |
57 | 57 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 58 | cnt = ring_buffer_entries(tr->buffer); |
@@ -63,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | __raw_spin_unlock(&ftrace_max_lock); | 65 | __raw_spin_unlock(&ftrace_max_lock); |
66 | raw_local_irq_restore(flags); | 66 | local_irq_restore(flags); |
67 | 67 | ||
68 | if (count) | 68 | if (count) |
69 | *count = cnt; | 69 | *count = cnt; |
@@ -71,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | ||
75 | { | ||
76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | ||
77 | trace->name, init_ret); | ||
78 | } | ||
74 | #ifdef CONFIG_FUNCTION_TRACER | 79 | #ifdef CONFIG_FUNCTION_TRACER |
75 | 80 | ||
76 | #ifdef CONFIG_DYNAMIC_FTRACE | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -111,7 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
111 | ftrace_set_filter(func_name, strlen(func_name), 1); | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
112 | 117 | ||
113 | /* enable tracing */ | 118 | /* enable tracing */ |
114 | trace->init(tr); | 119 | ret = trace->init(tr); |
120 | if (ret) { | ||
121 | warn_failed_init_tracer(trace, ret); | ||
122 | goto out; | ||
123 | } | ||
115 | 124 | ||
116 | /* Sleep for a 1/10 of a second */ | 125 | /* Sleep for a 1/10 of a second */ |
117 | msleep(100); | 126 | msleep(100); |
@@ -181,7 +190,12 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
181 | ftrace_enabled = 1; | 190 | ftrace_enabled = 1; |
182 | tracer_enabled = 1; | 191 | tracer_enabled = 1; |
183 | 192 | ||
184 | trace->init(tr); | 193 | ret = trace->init(tr); |
194 | if (ret) { | ||
195 | warn_failed_init_tracer(trace, ret); | ||
196 | goto out; | ||
197 | } | ||
198 | |||
185 | /* Sleep for a 1/10 of a second */ | 199 | /* Sleep for a 1/10 of a second */ |
186 | msleep(100); | 200 | msleep(100); |
187 | /* stop the tracing. */ | 201 | /* stop the tracing. */ |
@@ -223,7 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
223 | int ret; | 237 | int ret; |
224 | 238 | ||
225 | /* start the tracing */ | 239 | /* start the tracing */ |
226 | trace->init(tr); | 240 | ret = trace->init(tr); |
241 | if (ret) { | ||
242 | warn_failed_init_tracer(trace, ret); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
227 | /* reset the max latency */ | 246 | /* reset the max latency */ |
228 | tracing_max_latency = 0; | 247 | tracing_max_latency = 0; |
229 | /* disable interrupts for a bit */ | 248 | /* disable interrupts for a bit */ |
@@ -272,7 +291,12 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
272 | } | 291 | } |
273 | 292 | ||
274 | /* start the tracing */ | 293 | /* start the tracing */ |
275 | trace->init(tr); | 294 | ret = trace->init(tr); |
295 | if (ret) { | ||
296 | warn_failed_init_tracer(trace, ret); | ||
297 | return ret; | ||
298 | } | ||
299 | |||
276 | /* reset the max latency */ | 300 | /* reset the max latency */ |
277 | tracing_max_latency = 0; | 301 | tracing_max_latency = 0; |
278 | /* disable preemption for a bit */ | 302 | /* disable preemption for a bit */ |
@@ -321,7 +345,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
321 | } | 345 | } |
322 | 346 | ||
323 | /* start the tracing */ | 347 | /* start the tracing */ |
324 | trace->init(tr); | 348 | ret = trace->init(tr); |
349 | if (ret) { | ||
350 | warn_failed_init_tracer(trace, ret); | ||
351 | goto out; | ||
352 | } | ||
325 | 353 | ||
326 | /* reset the max latency */ | 354 | /* reset the max latency */ |
327 | tracing_max_latency = 0; | 355 | tracing_max_latency = 0; |
@@ -449,7 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
449 | wait_for_completion(&isrt); | 477 | wait_for_completion(&isrt); |
450 | 478 | ||
451 | /* start the tracing */ | 479 | /* start the tracing */ |
452 | trace->init(tr); | 480 | ret = trace->init(tr); |
481 | if (ret) { | ||
482 | warn_failed_init_tracer(trace, ret); | ||
483 | return ret; | ||
484 | } | ||
485 | |||
453 | /* reset the max latency */ | 486 | /* reset the max latency */ |
454 | tracing_max_latency = 0; | 487 | tracing_max_latency = 0; |
455 | 488 | ||
@@ -505,7 +538,12 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
505 | int ret; | 538 | int ret; |
506 | 539 | ||
507 | /* start the tracing */ | 540 | /* start the tracing */ |
508 | trace->init(tr); | 541 | ret = trace->init(tr); |
542 | if (ret) { | ||
543 | warn_failed_init_tracer(trace, ret); | ||
544 | return ret; | ||
545 | } | ||
546 | |||
509 | /* Sleep for a 1/10 of a second */ | 547 | /* Sleep for a 1/10 of a second */ |
510 | msleep(100); | 548 | msleep(100); |
511 | /* stop the tracing. */ | 549 | /* stop the tracing. */ |
@@ -532,7 +570,12 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
532 | int ret; | 570 | int ret; |
533 | 571 | ||
534 | /* start the tracing */ | 572 | /* start the tracing */ |
535 | trace->init(tr); | 573 | ret = trace->init(tr); |
574 | if (ret) { | ||
575 | warn_failed_init_tracer(trace, ret); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
536 | /* Sleep for a 1/10 of a second */ | 579 | /* Sleep for a 1/10 of a second */ |
537 | msleep(100); | 580 | msleep(100); |
538 | /* stop the tracing. */ | 581 | /* stop the tracing. */ |
@@ -554,7 +597,12 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
554 | int ret; | 597 | int ret; |
555 | 598 | ||
556 | /* start the tracing */ | 599 | /* start the tracing */ |
557 | trace->init(tr); | 600 | ret = trace->init(tr); |
601 | if (ret) { | ||
602 | warn_failed_init_tracer(trace, ret); | ||
603 | return ret; | ||
604 | } | ||
605 | |||
558 | /* Sleep for a 1/10 of a second */ | 606 | /* Sleep for a 1/10 of a second */ |
559 | msleep(100); | 607 | msleep(100); |
560 | /* stop the tracing. */ | 608 | /* stop the tracing. */ |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 05f753422aea..54960edb96d0 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -261,11 +261,12 @@ static void stop_stack_trace(struct trace_array *tr) | |||
261 | mutex_unlock(&sample_timer_lock); | 261 | mutex_unlock(&sample_timer_lock); |
262 | } | 262 | } |
263 | 263 | ||
264 | static void stack_trace_init(struct trace_array *tr) | 264 | static int stack_trace_init(struct trace_array *tr) |
265 | { | 265 | { |
266 | sysprof_trace = tr; | 266 | sysprof_trace = tr; |
267 | 267 | ||
268 | start_stack_trace(tr); | 268 | start_stack_trace(tr); |
269 | return 0; | ||
269 | } | 270 | } |
270 | 271 | ||
271 | static void stack_trace_reset(struct trace_array *tr) | 272 | static void stack_trace_reset(struct trace_array *tr) |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index e96590f17de1..79602740bbb5 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -262,6 +262,7 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
262 | static void disable_tracepoint(struct tracepoint *elem) | 262 | static void disable_tracepoint(struct tracepoint *elem) |
263 | { | 263 | { |
264 | elem->state = 0; | 264 | elem->state = 0; |
265 | rcu_assign_pointer(elem->funcs, NULL); | ||
265 | } | 266 | } |
266 | 267 | ||
267 | /** | 268 | /** |
@@ -540,3 +541,36 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
540 | iter->tracepoint = NULL; | 541 | iter->tracepoint = NULL; |
541 | } | 542 | } |
542 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 543 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
544 | |||
545 | #ifdef CONFIG_MODULES | ||
546 | |||
547 | int tracepoint_module_notify(struct notifier_block *self, | ||
548 | unsigned long val, void *data) | ||
549 | { | ||
550 | struct module *mod = data; | ||
551 | |||
552 | switch (val) { | ||
553 | case MODULE_STATE_COMING: | ||
554 | tracepoint_update_probe_range(mod->tracepoints, | ||
555 | mod->tracepoints + mod->num_tracepoints); | ||
556 | break; | ||
557 | case MODULE_STATE_GOING: | ||
558 | tracepoint_update_probe_range(mod->tracepoints, | ||
559 | mod->tracepoints + mod->num_tracepoints); | ||
560 | break; | ||
561 | } | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | struct notifier_block tracepoint_module_nb = { | ||
566 | .notifier_call = tracepoint_module_notify, | ||
567 | .priority = 0, | ||
568 | }; | ||
569 | |||
570 | static int init_tracepoints(void) | ||
571 | { | ||
572 | return register_module_notifier(&tracepoint_module_nb); | ||
573 | } | ||
574 | __initcall(init_tracepoints); | ||
575 | |||
576 | #endif /* CONFIG_MODULES */ | ||
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h index 0216b55bd640..01724e04c556 100644 --- a/samples/tracepoints/tp-samples-trace.h +++ b/samples/tracepoints/tp-samples-trace.h | |||
@@ -4,10 +4,10 @@ | |||
4 | #include <linux/proc_fs.h> /* for struct inode and struct file */ | 4 | #include <linux/proc_fs.h> /* for struct inode and struct file */ |
5 | #include <linux/tracepoint.h> | 5 | #include <linux/tracepoint.h> |
6 | 6 | ||
7 | DEFINE_TRACE(subsys_event, | 7 | DECLARE_TRACE(subsys_event, |
8 | TPPROTO(struct inode *inode, struct file *file), | 8 | TPPROTO(struct inode *inode, struct file *file), |
9 | TPARGS(inode, file)); | 9 | TPARGS(inode, file)); |
10 | DEFINE_TRACE(subsys_eventb, | 10 | DECLARE_TRACE(subsys_eventb, |
11 | TPPROTO(void), | 11 | TPPROTO(void), |
12 | TPARGS()); | 12 | TPARGS()); |
13 | #endif | 13 | #endif |
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c index 55abfdda4bd4..e3a964889dc7 100644 --- a/samples/tracepoints/tracepoint-probe-sample.c +++ b/samples/tracepoints/tracepoint-probe-sample.c | |||
@@ -46,6 +46,7 @@ void __exit tp_sample_trace_exit(void) | |||
46 | { | 46 | { |
47 | unregister_trace_subsys_eventb(probe_subsys_eventb); | 47 | unregister_trace_subsys_eventb(probe_subsys_eventb); |
48 | unregister_trace_subsys_event(probe_subsys_event); | 48 | unregister_trace_subsys_event(probe_subsys_event); |
49 | tracepoint_synchronize_unregister(); | ||
49 | } | 50 | } |
50 | 51 | ||
51 | module_exit(tp_sample_trace_exit); | 52 | module_exit(tp_sample_trace_exit); |
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c index 5e9fcf4afffe..685a5acb4562 100644 --- a/samples/tracepoints/tracepoint-probe-sample2.c +++ b/samples/tracepoints/tracepoint-probe-sample2.c | |||
@@ -33,6 +33,7 @@ module_init(tp_sample_trace_init); | |||
33 | void __exit tp_sample_trace_exit(void) | 33 | void __exit tp_sample_trace_exit(void) |
34 | { | 34 | { |
35 | unregister_trace_subsys_event(probe_subsys_event); | 35 | unregister_trace_subsys_event(probe_subsys_event); |
36 | tracepoint_synchronize_unregister(); | ||
36 | } | 37 | } |
37 | 38 | ||
38 | module_exit(tp_sample_trace_exit); | 39 | module_exit(tp_sample_trace_exit); |
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c index 4ae4b7fcc043..00d169792a3e 100644 --- a/samples/tracepoints/tracepoint-sample.c +++ b/samples/tracepoints/tracepoint-sample.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
14 | #include "tp-samples-trace.h" | 14 | #include "tp-samples-trace.h" |
15 | 15 | ||
16 | DEFINE_TRACE(subsys_event); | ||
17 | DEFINE_TRACE(subsys_eventb); | ||
18 | |||
16 | struct proc_dir_entry *pentry_example; | 19 | struct proc_dir_entry *pentry_example; |
17 | 20 | ||
18 | static int my_open(struct inode *inode, struct file *file) | 21 | static int my_open(struct inode *inode, struct file *file) |