diff options
50 files changed, 1032 insertions, 693 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index b7fa2f599459..45fbea7c329b 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1097,6 +1097,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1097 | that can be changed at run time by the | 1097 | that can be changed at run time by the |
1098 | set_graph_function file in the debugfs tracing directory. | 1098 | set_graph_function file in the debugfs tracing directory. |
1099 | 1099 | ||
1100 | ftrace_graph_notrace=[function-list] | ||
1101 | [FTRACE] Do not trace from the functions specified in | ||
1102 | function-list. This list is a comma separated list of | ||
1103 | functions that can be changed at run time by the | ||
1104 | set_graph_notrace file in the debugfs tracing directory. | ||
1105 | |||
1100 | gamecon.map[2|3]= | 1106 | gamecon.map[2|3]= |
1101 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad | 1107 | [HW,JOY] Multisystem joystick and NES/SNES/PSX pad |
1102 | support via parallel port (up to 5 devices per port) | 1108 | support via parallel port (up to 5 devices per port) |
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt index 3f669b9e8852..dd5f916b351d 100644 --- a/Documentation/trace/ftrace-design.txt +++ b/Documentation/trace/ftrace-design.txt | |||
@@ -102,30 +102,6 @@ extern void mcount(void); | |||
102 | EXPORT_SYMBOL(mcount); | 102 | EXPORT_SYMBOL(mcount); |
103 | 103 | ||
104 | 104 | ||
105 | HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
106 | ------------------------------- | ||
107 | |||
108 | This is an optional optimization for the normal case when tracing is turned off | ||
109 | in the system. If you do not enable this Kconfig option, the common ftrace | ||
110 | code will take care of doing the checking for you. | ||
111 | |||
112 | To support this feature, you only need to check the function_trace_stop | ||
113 | variable in the mcount function. If it is non-zero, there is no tracing to be | ||
114 | done at all, so you can return. | ||
115 | |||
116 | This additional pseudo code would simply be: | ||
117 | void mcount(void) | ||
118 | { | ||
119 | /* save any bare state needed in order to do initial checking */ | ||
120 | |||
121 | + if (function_trace_stop) | ||
122 | + return; | ||
123 | |||
124 | extern void (*ftrace_trace_function)(unsigned long, unsigned long); | ||
125 | if (ftrace_trace_function != ftrace_stub) | ||
126 | ... | ||
127 | |||
128 | |||
129 | HAVE_FUNCTION_GRAPH_TRACER | 105 | HAVE_FUNCTION_GRAPH_TRACER |
130 | -------------------------- | 106 | -------------------------- |
131 | 107 | ||
@@ -328,8 +304,6 @@ void mcount(void) | |||
328 | 304 | ||
329 | void ftrace_caller(void) | 305 | void ftrace_caller(void) |
330 | { | 306 | { |
331 | /* implement HAVE_FUNCTION_TRACE_MCOUNT_TEST if you desire */ | ||
332 | |||
333 | /* save all state needed by the ABI (see paragraph above) */ | 307 | /* save all state needed by the ABI (see paragraph above) */ |
334 | 308 | ||
335 | unsigned long frompc = ...; | 309 | unsigned long frompc = ...; |
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index aa5f9fcbf9ee..38e704e597f7 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -96,11 +96,6 @@ | |||
96 | * - ftrace_graph_caller to set up an exit hook | 96 | * - ftrace_graph_caller to set up an exit hook |
97 | */ | 97 | */ |
98 | ENTRY(_mcount) | 98 | ENTRY(_mcount) |
99 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
100 | ldr x0, =ftrace_trace_stop | ||
101 | ldr x0, [x0] // if ftrace_trace_stop | ||
102 | ret // return; | ||
103 | #endif | ||
104 | mcount_enter | 99 | mcount_enter |
105 | 100 | ||
106 | ldr x0, =ftrace_trace_function | 101 | ldr x0, =ftrace_trace_function |
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f81e7b989fff..ed30699cc635 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -18,7 +18,6 @@ config BLACKFIN | |||
18 | select HAVE_FTRACE_MCOUNT_RECORD | 18 | select HAVE_FTRACE_MCOUNT_RECORD |
19 | select HAVE_FUNCTION_GRAPH_TRACER | 19 | select HAVE_FUNCTION_GRAPH_TRACER |
20 | select HAVE_FUNCTION_TRACER | 20 | select HAVE_FUNCTION_TRACER |
21 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
22 | select HAVE_IDE | 21 | select HAVE_IDE |
23 | select HAVE_KERNEL_GZIP if RAMKERNEL | 22 | select HAVE_KERNEL_GZIP if RAMKERNEL |
24 | select HAVE_KERNEL_BZIP2 if RAMKERNEL | 23 | select HAVE_KERNEL_BZIP2 if RAMKERNEL |
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 7eed00bbd26d..28d059540424 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S | |||
@@ -33,15 +33,6 @@ ENDPROC(__mcount) | |||
33 | * function will be waiting there. mmmm pie. | 33 | * function will be waiting there. mmmm pie. |
34 | */ | 34 | */ |
35 | ENTRY(_ftrace_caller) | 35 | ENTRY(_ftrace_caller) |
36 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
37 | /* optional micro optimization: return if stopped */ | ||
38 | p1.l = _function_trace_stop; | ||
39 | p1.h = _function_trace_stop; | ||
40 | r3 = [p1]; | ||
41 | cc = r3 == 0; | ||
42 | if ! cc jump _ftrace_stub (bp); | ||
43 | # endif | ||
44 | |||
45 | /* save first/second/third function arg and the return register */ | 36 | /* save first/second/third function arg and the return register */ |
46 | [--sp] = r2; | 37 | [--sp] = r2; |
47 | [--sp] = r0; | 38 | [--sp] = r0; |
@@ -83,15 +74,6 @@ ENDPROC(_ftrace_caller) | |||
83 | 74 | ||
84 | /* See documentation for _ftrace_caller */ | 75 | /* See documentation for _ftrace_caller */ |
85 | ENTRY(__mcount) | 76 | ENTRY(__mcount) |
86 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
87 | /* optional micro optimization: return if stopped */ | ||
88 | p1.l = _function_trace_stop; | ||
89 | p1.h = _function_trace_stop; | ||
90 | r3 = [p1]; | ||
91 | cc = r3 == 0; | ||
92 | if ! cc jump _ftrace_stub (bp); | ||
93 | # endif | ||
94 | |||
95 | /* save third function arg early so we can do testing below */ | 77 | /* save third function arg early so we can do testing below */ |
96 | [--sp] = r2; | 78 | [--sp] = r2; |
97 | 79 | ||
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 499b7610eaaf..0b389a81c43a 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig | |||
@@ -13,7 +13,6 @@ config METAG | |||
13 | select HAVE_DYNAMIC_FTRACE | 13 | select HAVE_DYNAMIC_FTRACE |
14 | select HAVE_FTRACE_MCOUNT_RECORD | 14 | select HAVE_FTRACE_MCOUNT_RECORD |
15 | select HAVE_FUNCTION_TRACER | 15 | select HAVE_FUNCTION_TRACER |
16 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
17 | select HAVE_KERNEL_BZIP2 | 16 | select HAVE_KERNEL_BZIP2 |
18 | select HAVE_KERNEL_GZIP | 17 | select HAVE_KERNEL_GZIP |
19 | select HAVE_KERNEL_LZO | 18 | select HAVE_KERNEL_LZO |
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S index e70bff745bdd..3acc288217c0 100644 --- a/arch/metag/kernel/ftrace_stub.S +++ b/arch/metag/kernel/ftrace_stub.S | |||
@@ -16,13 +16,6 @@ _mcount_wrapper: | |||
16 | .global _ftrace_caller | 16 | .global _ftrace_caller |
17 | .type _ftrace_caller,function | 17 | .type _ftrace_caller,function |
18 | _ftrace_caller: | 18 | _ftrace_caller: |
19 | MOVT D0Re0,#HI(_function_trace_stop) | ||
20 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
21 | GETD D0Re0,[D0Re0] | ||
22 | CMP D0Re0,#0 | ||
23 | BEQ $Lcall_stub | ||
24 | MOV PC,D0.4 | ||
25 | $Lcall_stub: | ||
26 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | 19 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 |
27 | MOV D1Ar1, D0.4 | 20 | MOV D1Ar1, D0.4 |
28 | MOV D0Ar2, D1RtP | 21 | MOV D0Ar2, D1RtP |
@@ -42,13 +35,6 @@ _ftrace_call: | |||
42 | .global _mcount_wrapper | 35 | .global _mcount_wrapper |
43 | .type _mcount_wrapper,function | 36 | .type _mcount_wrapper,function |
44 | _mcount_wrapper: | 37 | _mcount_wrapper: |
45 | MOVT D0Re0,#HI(_function_trace_stop) | ||
46 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
47 | GETD D0Re0,[D0Re0] | ||
48 | CMP D0Re0,#0 | ||
49 | BEQ $Lcall_mcount | ||
50 | MOV PC,D0.4 | ||
51 | $Lcall_mcount: | ||
52 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | 38 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 |
53 | MOV D1Ar1, D0.4 | 39 | MOV D1Ar1, D0.4 |
54 | MOV D0Ar2, D1RtP | 40 | MOV D0Ar2, D1RtP |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 9ae08541e30d..40e1c1dd0e24 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -22,7 +22,6 @@ config MICROBLAZE | |||
22 | select HAVE_DYNAMIC_FTRACE | 22 | select HAVE_DYNAMIC_FTRACE |
23 | select HAVE_FTRACE_MCOUNT_RECORD | 23 | select HAVE_FTRACE_MCOUNT_RECORD |
24 | select HAVE_FUNCTION_GRAPH_TRACER | 24 | select HAVE_FUNCTION_GRAPH_TRACER |
25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
26 | select HAVE_FUNCTION_TRACER | 25 | select HAVE_FUNCTION_TRACER |
27 | select HAVE_MEMBLOCK | 26 | select HAVE_MEMBLOCK |
28 | select HAVE_MEMBLOCK_NODE_MAP | 27 | select HAVE_MEMBLOCK_NODE_MAP |
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index bbcd2533766c..fc7b48a52cd5 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c | |||
@@ -27,6 +27,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
27 | unsigned long return_hooker = (unsigned long) | 27 | unsigned long return_hooker = (unsigned long) |
28 | &return_to_handler; | 28 | &return_to_handler; |
29 | 29 | ||
30 | if (unlikely(ftrace_graph_is_dead())) | ||
31 | return; | ||
32 | |||
30 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 33 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
31 | return; | 34 | return; |
32 | 35 | ||
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S index fc1e1322ce4c..fed9da5de8c4 100644 --- a/arch/microblaze/kernel/mcount.S +++ b/arch/microblaze/kernel/mcount.S | |||
@@ -91,11 +91,6 @@ ENTRY(ftrace_caller) | |||
91 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 91 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
92 | SAVE_REGS | 92 | SAVE_REGS |
93 | swi r15, r1, 0; | 93 | swi r15, r1, 0; |
94 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */ | ||
95 | lwi r5, r0, function_trace_stop; | ||
96 | bneid r5, end; | ||
97 | nop; | ||
98 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */ | ||
99 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 94 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
100 | #ifndef CONFIG_DYNAMIC_FTRACE | 95 | #ifndef CONFIG_DYNAMIC_FTRACE |
101 | lwi r5, r0, ftrace_graph_return; | 96 | lwi r5, r0, ftrace_graph_return; |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4e238e6e661c..10f270bd3e25 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -15,7 +15,6 @@ config MIPS | |||
15 | select HAVE_BPF_JIT if !CPU_MICROMIPS | 15 | select HAVE_BPF_JIT if !CPU_MICROMIPS |
16 | select ARCH_HAVE_CUSTOM_GPIO_H | 16 | select ARCH_HAVE_CUSTOM_GPIO_H |
17 | select HAVE_FUNCTION_TRACER | 17 | select HAVE_FUNCTION_TRACER |
18 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
19 | select HAVE_DYNAMIC_FTRACE | 18 | select HAVE_DYNAMIC_FTRACE |
20 | select HAVE_FTRACE_MCOUNT_RECORD | 19 | select HAVE_FTRACE_MCOUNT_RECORD |
21 | select HAVE_C_RECORDMCOUNT | 20 | select HAVE_C_RECORDMCOUNT |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 60e7e5e45af1..8b6538750fe1 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -302,6 +302,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
302 | &return_to_handler; | 302 | &return_to_handler; |
303 | int faulted, insns; | 303 | int faulted, insns; |
304 | 304 | ||
305 | if (unlikely(ftrace_graph_is_dead())) | ||
306 | return; | ||
307 | |||
305 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 308 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
306 | return; | 309 | return; |
307 | 310 | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 539b6294b613..00940d1d5c4f 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -74,10 +74,6 @@ _mcount: | |||
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | 76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ |
77 | lw t1, function_trace_stop | ||
78 | bnez t1, ftrace_stub | ||
79 | nop | ||
80 | |||
81 | MCOUNT_SAVE_REGS | 77 | MCOUNT_SAVE_REGS |
82 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | 78 | #ifdef KBUILD_MCOUNT_RA_ADDRESS |
83 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) | 79 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) |
@@ -105,9 +101,6 @@ ftrace_stub: | |||
105 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 101 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
106 | 102 | ||
107 | NESTED(_mcount, PT_SIZE, ra) | 103 | NESTED(_mcount, PT_SIZE, ra) |
108 | lw t1, function_trace_stop | ||
109 | bnez t1, ftrace_stub | ||
110 | nop | ||
111 | PTR_LA t1, ftrace_stub | 104 | PTR_LA t1, ftrace_stub |
112 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ | 105 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ |
113 | bne t1, t2, static_trace | 106 | bne t1, t2, static_trace |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 108d48e652af..6e75e2030927 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -6,7 +6,6 @@ config PARISC | |||
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_FUNCTION_TRACER if 64BIT | 7 | select HAVE_FUNCTION_TRACER if 64BIT |
8 | select HAVE_FUNCTION_GRAPH_TRACER if 64BIT | 8 | select HAVE_FUNCTION_GRAPH_TRACER if 64BIT |
9 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT | ||
10 | select ARCH_WANT_FRAME_POINTERS | 9 | select ARCH_WANT_FRAME_POINTERS |
11 | select RTC_CLASS | 10 | select RTC_CLASS |
12 | select RTC_DRV_GENERIC | 11 | select RTC_DRV_GENERIC |
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 5beb97bafbb1..559d400f9385 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c | |||
@@ -112,6 +112,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
112 | unsigned long long calltime; | 112 | unsigned long long calltime; |
113 | struct ftrace_graph_ent trace; | 113 | struct ftrace_graph_ent trace; |
114 | 114 | ||
115 | if (unlikely(ftrace_graph_is_dead())) | ||
116 | return; | ||
117 | |||
115 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 118 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
116 | return; | 119 | return; |
117 | 120 | ||
@@ -152,9 +155,6 @@ void ftrace_function_trampoline(unsigned long parent, | |||
152 | { | 155 | { |
153 | extern ftrace_func_t ftrace_trace_function; | 156 | extern ftrace_func_t ftrace_trace_function; |
154 | 157 | ||
155 | if (function_trace_stop) | ||
156 | return; | ||
157 | |||
158 | if (ftrace_trace_function != ftrace_stub) { | 158 | if (ftrace_trace_function != ftrace_stub) { |
159 | ftrace_trace_function(parent, self_addr); | 159 | ftrace_trace_function(parent, self_addr); |
160 | return; | 160 | return; |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index d178834fe508..390311c0f03d 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -525,6 +525,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
525 | struct ftrace_graph_ent trace; | 525 | struct ftrace_graph_ent trace; |
526 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 526 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
527 | 527 | ||
528 | if (unlikely(ftrace_graph_is_dead())) | ||
529 | return; | ||
530 | |||
528 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 531 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
529 | return; | 532 | return; |
530 | 533 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bb63499fc5d3..f5af5f6ef0f4 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -116,7 +116,6 @@ config S390 | |||
116 | select HAVE_FTRACE_MCOUNT_RECORD | 116 | select HAVE_FTRACE_MCOUNT_RECORD |
117 | select HAVE_FUNCTION_GRAPH_TRACER | 117 | select HAVE_FUNCTION_GRAPH_TRACER |
118 | select HAVE_FUNCTION_TRACER | 118 | select HAVE_FUNCTION_TRACER |
119 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
120 | select HAVE_FUTEX_CMPXCHG if FUTEX | 119 | select HAVE_FUTEX_CMPXCHG if FUTEX |
121 | select HAVE_KERNEL_BZIP2 | 120 | select HAVE_KERNEL_BZIP2 |
122 | select HAVE_KERNEL_GZIP | 121 | select HAVE_KERNEL_GZIP |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 08dcf21cb8df..433c6dbfa442 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -21,13 +21,9 @@ ENTRY(_mcount) | |||
21 | ENTRY(ftrace_caller) | 21 | ENTRY(ftrace_caller) |
22 | #endif | 22 | #endif |
23 | stm %r2,%r5,16(%r15) | 23 | stm %r2,%r5,16(%r15) |
24 | bras %r1,2f | 24 | bras %r1,1f |
25 | 0: .long ftrace_trace_function | 25 | 0: .long ftrace_trace_function |
26 | 1: .long function_trace_stop | 26 | 1: st %r14,56(%r15) |
27 | 2: l %r2,1b-0b(%r1) | ||
28 | icm %r2,0xf,0(%r2) | ||
29 | jnz 3f | ||
30 | st %r14,56(%r15) | ||
31 | lr %r0,%r15 | 27 | lr %r0,%r15 |
32 | ahi %r15,-96 | 28 | ahi %r15,-96 |
33 | l %r3,100(%r15) | 29 | l %r3,100(%r15) |
@@ -50,7 +46,7 @@ ENTRY(ftrace_graph_caller) | |||
50 | #endif | 46 | #endif |
51 | ahi %r15,96 | 47 | ahi %r15,96 |
52 | l %r14,56(%r15) | 48 | l %r14,56(%r15) |
53 | 3: lm %r2,%r5,16(%r15) | 49 | lm %r2,%r5,16(%r15) |
54 | br %r14 | 50 | br %r14 |
55 | 51 | ||
56 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 52 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index 1c52eae3396a..c67a8bf0fd9a 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S | |||
@@ -20,9 +20,6 @@ ENTRY(_mcount) | |||
20 | 20 | ||
21 | ENTRY(ftrace_caller) | 21 | ENTRY(ftrace_caller) |
22 | #endif | 22 | #endif |
23 | larl %r1,function_trace_stop | ||
24 | icm %r1,0xf,0(%r1) | ||
25 | bnzr %r14 | ||
26 | stmg %r2,%r5,32(%r15) | 23 | stmg %r2,%r5,32(%r15) |
27 | stg %r14,112(%r15) | 24 | stg %r14,112(%r15) |
28 | lgr %r1,%r15 | 25 | lgr %r1,%r15 |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 834b67c4db5a..aa2df3eaeb29 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -57,7 +57,6 @@ config SUPERH32 | |||
57 | select HAVE_FUNCTION_TRACER | 57 | select HAVE_FUNCTION_TRACER |
58 | select HAVE_FTRACE_MCOUNT_RECORD | 58 | select HAVE_FTRACE_MCOUNT_RECORD |
59 | select HAVE_DYNAMIC_FTRACE | 59 | select HAVE_DYNAMIC_FTRACE |
60 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
61 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | 60 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE |
62 | select ARCH_WANT_IPC_PARSE_VERSION | 61 | select ARCH_WANT_IPC_PARSE_VERSION |
63 | select HAVE_FUNCTION_GRAPH_TRACER | 62 | select HAVE_FUNCTION_GRAPH_TRACER |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 3c74f53db6db..079d70e6d74b 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -344,6 +344,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
344 | struct ftrace_graph_ent trace; | 344 | struct ftrace_graph_ent trace; |
345 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 345 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
346 | 346 | ||
347 | if (unlikely(ftrace_graph_is_dead())) | ||
348 | return; | ||
349 | |||
347 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 350 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
348 | return; | 351 | return; |
349 | 352 | ||
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 52aa2011d753..7a8572f9d58b 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S | |||
@@ -92,13 +92,6 @@ mcount: | |||
92 | rts | 92 | rts |
93 | nop | 93 | nop |
94 | #else | 94 | #else |
95 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
96 | mov.l .Lfunction_trace_stop, r0 | ||
97 | mov.l @r0, r0 | ||
98 | tst r0, r0 | ||
99 | bf ftrace_stub | ||
100 | #endif | ||
101 | |||
102 | MCOUNT_ENTER() | 95 | MCOUNT_ENTER() |
103 | 96 | ||
104 | #ifdef CONFIG_DYNAMIC_FTRACE | 97 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -174,11 +167,6 @@ ftrace_graph_call: | |||
174 | 167 | ||
175 | .globl ftrace_caller | 168 | .globl ftrace_caller |
176 | ftrace_caller: | 169 | ftrace_caller: |
177 | mov.l .Lfunction_trace_stop, r0 | ||
178 | mov.l @r0, r0 | ||
179 | tst r0, r0 | ||
180 | bf ftrace_stub | ||
181 | |||
182 | MCOUNT_ENTER() | 170 | MCOUNT_ENTER() |
183 | 171 | ||
184 | .globl ftrace_call | 172 | .globl ftrace_call |
@@ -196,8 +184,6 @@ ftrace_call: | |||
196 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 184 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
197 | 185 | ||
198 | .align 2 | 186 | .align 2 |
199 | .Lfunction_trace_stop: | ||
200 | .long function_trace_stop | ||
201 | 187 | ||
202 | /* | 188 | /* |
203 | * NOTE: From here on the locations of the .Lftrace_stub label and | 189 | * NOTE: From here on the locations of the .Lftrace_stub label and |
@@ -217,12 +203,7 @@ ftrace_stub: | |||
217 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 203 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
218 | .globl ftrace_graph_caller | 204 | .globl ftrace_graph_caller |
219 | ftrace_graph_caller: | 205 | ftrace_graph_caller: |
220 | mov.l 2f, r0 | 206 | mov.l 2f, r1 |
221 | mov.l @r0, r0 | ||
222 | tst r0, r0 | ||
223 | bt 1f | ||
224 | |||
225 | mov.l 3f, r1 | ||
226 | jmp @r1 | 207 | jmp @r1 |
227 | nop | 208 | nop |
228 | 1: | 209 | 1: |
@@ -242,8 +223,7 @@ ftrace_graph_caller: | |||
242 | MCOUNT_LEAVE() | 223 | MCOUNT_LEAVE() |
243 | 224 | ||
244 | .align 2 | 225 | .align 2 |
245 | 2: .long function_trace_stop | 226 | 2: .long skip_trace |
246 | 3: .long skip_trace | ||
247 | .Lprepare_ftrace_return: | 227 | .Lprepare_ftrace_return: |
248 | .long prepare_ftrace_return | 228 | .long prepare_ftrace_return |
249 | 229 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 407c87d9879a..4692c90936f1 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -55,7 +55,6 @@ config SPARC64 | |||
55 | select HAVE_FUNCTION_TRACER | 55 | select HAVE_FUNCTION_TRACER |
56 | select HAVE_FUNCTION_GRAPH_TRACER | 56 | select HAVE_FUNCTION_GRAPH_TRACER |
57 | select HAVE_FUNCTION_GRAPH_FP_TEST | 57 | select HAVE_FUNCTION_GRAPH_FP_TEST |
58 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
59 | select HAVE_KRETPROBES | 58 | select HAVE_KRETPROBES |
60 | select HAVE_KPROBES | 59 | select HAVE_KPROBES |
61 | select HAVE_RCU_TABLE_FREE if SMP | 60 | select HAVE_RCU_TABLE_FREE if SMP |
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 3ad6cbdc2163..0b0ed4d34219 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S | |||
@@ -24,10 +24,7 @@ mcount: | |||
24 | #ifdef CONFIG_DYNAMIC_FTRACE | 24 | #ifdef CONFIG_DYNAMIC_FTRACE |
25 | /* Do nothing, the retl/nop below is all we need. */ | 25 | /* Do nothing, the retl/nop below is all we need. */ |
26 | #else | 26 | #else |
27 | sethi %hi(function_trace_stop), %g1 | 27 | sethi %hi(ftrace_trace_function), %g1 |
28 | lduw [%g1 + %lo(function_trace_stop)], %g2 | ||
29 | brnz,pn %g2, 2f | ||
30 | sethi %hi(ftrace_trace_function), %g1 | ||
31 | sethi %hi(ftrace_stub), %g2 | 28 | sethi %hi(ftrace_stub), %g2 |
32 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 | 29 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 |
33 | or %g2, %lo(ftrace_stub), %g2 | 30 | or %g2, %lo(ftrace_stub), %g2 |
@@ -80,11 +77,8 @@ ftrace_stub: | |||
80 | .globl ftrace_caller | 77 | .globl ftrace_caller |
81 | .type ftrace_caller,#function | 78 | .type ftrace_caller,#function |
82 | ftrace_caller: | 79 | ftrace_caller: |
83 | sethi %hi(function_trace_stop), %g1 | ||
84 | mov %i7, %g2 | 80 | mov %i7, %g2 |
85 | lduw [%g1 + %lo(function_trace_stop)], %g1 | 81 | mov %fp, %g3 |
86 | brnz,pn %g1, ftrace_stub | ||
87 | mov %fp, %g3 | ||
88 | save %sp, -176, %sp | 82 | save %sp, -176, %sp |
89 | mov %g2, %o1 | 83 | mov %g2, %o1 |
90 | mov %g2, %l0 | 84 | mov %g2, %l0 |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4f3006b600e3..7fcd492adbfc 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -128,7 +128,6 @@ config TILEGX | |||
128 | select SPARSE_IRQ | 128 | select SPARSE_IRQ |
129 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | 129 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
130 | select HAVE_FUNCTION_TRACER | 130 | select HAVE_FUNCTION_TRACER |
131 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
132 | select HAVE_FUNCTION_GRAPH_TRACER | 131 | select HAVE_FUNCTION_GRAPH_TRACER |
133 | select HAVE_DYNAMIC_FTRACE | 132 | select HAVE_DYNAMIC_FTRACE |
134 | select HAVE_FTRACE_MCOUNT_RECORD | 133 | select HAVE_FTRACE_MCOUNT_RECORD |
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S index 70d7bb0c4d8f..3c2b8d5e1d1a 100644 --- a/arch/tile/kernel/mcount_64.S +++ b/arch/tile/kernel/mcount_64.S | |||
@@ -77,15 +77,6 @@ STD_ENDPROC(__mcount) | |||
77 | 77 | ||
78 | .align 64 | 78 | .align 64 |
79 | STD_ENTRY(ftrace_caller) | 79 | STD_ENTRY(ftrace_caller) |
80 | moveli r11, hw2_last(function_trace_stop) | ||
81 | { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } | ||
82 | { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } | ||
83 | ld r11, r11 | ||
84 | beqz r11, 1f | ||
85 | jrp r12 | ||
86 | |||
87 | 1: | ||
88 | { move r10, lr; move lr, r12 } | ||
89 | MCOUNT_SAVE_REGS | 80 | MCOUNT_SAVE_REGS |
90 | 81 | ||
91 | /* arg1: self return address */ | 82 | /* arg1: self return address */ |
@@ -119,15 +110,6 @@ STD_ENDPROC(ftrace_caller) | |||
119 | 110 | ||
120 | .align 64 | 111 | .align 64 |
121 | STD_ENTRY(__mcount) | 112 | STD_ENTRY(__mcount) |
122 | moveli r11, hw2_last(function_trace_stop) | ||
123 | { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } | ||
124 | { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } | ||
125 | ld r11, r11 | ||
126 | beqz r11, 1f | ||
127 | jrp r12 | ||
128 | |||
129 | 1: | ||
130 | { move r10, lr; move lr, r12 } | ||
131 | { | 113 | { |
132 | moveli r11, hw2_last(ftrace_trace_function) | 114 | moveli r11, hw2_last(ftrace_trace_function) |
133 | moveli r13, hw2_last(ftrace_stub) | 115 | moveli r13, hw2_last(ftrace_stub) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d24887b645dc..2840c27d4479 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -54,7 +54,6 @@ config X86 | |||
54 | select HAVE_FUNCTION_TRACER | 54 | select HAVE_FUNCTION_TRACER |
55 | select HAVE_FUNCTION_GRAPH_TRACER | 55 | select HAVE_FUNCTION_GRAPH_TRACER |
56 | select HAVE_FUNCTION_GRAPH_FP_TEST | 56 | select HAVE_FUNCTION_GRAPH_FP_TEST |
57 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
58 | select HAVE_SYSCALL_TRACEPOINTS | 57 | select HAVE_SYSCALL_TRACEPOINTS |
59 | select SYSCTL_EXCEPTION_TRACE | 58 | select SYSCTL_EXCEPTION_TRACE |
60 | select HAVE_KVM | 59 | select HAVE_KVM |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 0525a8bdf65d..e1f7fecaa7d6 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -68,6 +68,8 @@ struct dyn_arch_ftrace { | |||
68 | 68 | ||
69 | int ftrace_int3_handler(struct pt_regs *regs); | 69 | int ftrace_int3_handler(struct pt_regs *regs); |
70 | 70 | ||
71 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR | ||
72 | |||
71 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 73 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
72 | #endif /* __ASSEMBLY__ */ | 74 | #endif /* __ASSEMBLY__ */ |
73 | #endif /* CONFIG_FUNCTION_TRACER */ | 75 | #endif /* CONFIG_FUNCTION_TRACER */ |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 0d0c9d4ab6d5..47c410d99f5d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1059,9 +1059,6 @@ ENTRY(mcount) | |||
1059 | END(mcount) | 1059 | END(mcount) |
1060 | 1060 | ||
1061 | ENTRY(ftrace_caller) | 1061 | ENTRY(ftrace_caller) |
1062 | cmpl $0, function_trace_stop | ||
1063 | jne ftrace_stub | ||
1064 | |||
1065 | pushl %eax | 1062 | pushl %eax |
1066 | pushl %ecx | 1063 | pushl %ecx |
1067 | pushl %edx | 1064 | pushl %edx |
@@ -1093,8 +1090,6 @@ END(ftrace_caller) | |||
1093 | 1090 | ||
1094 | ENTRY(ftrace_regs_caller) | 1091 | ENTRY(ftrace_regs_caller) |
1095 | pushf /* push flags before compare (in cs location) */ | 1092 | pushf /* push flags before compare (in cs location) */ |
1096 | cmpl $0, function_trace_stop | ||
1097 | jne ftrace_restore_flags | ||
1098 | 1093 | ||
1099 | /* | 1094 | /* |
1100 | * i386 does not save SS and ESP when coming from kernel. | 1095 | * i386 does not save SS and ESP when coming from kernel. |
@@ -1153,7 +1148,6 @@ GLOBAL(ftrace_regs_call) | |||
1153 | popf /* Pop flags at end (no addl to corrupt flags) */ | 1148 | popf /* Pop flags at end (no addl to corrupt flags) */ |
1154 | jmp ftrace_ret | 1149 | jmp ftrace_ret |
1155 | 1150 | ||
1156 | ftrace_restore_flags: | ||
1157 | popf | 1151 | popf |
1158 | jmp ftrace_stub | 1152 | jmp ftrace_stub |
1159 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 1153 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
@@ -1162,9 +1156,6 @@ ENTRY(mcount) | |||
1162 | cmpl $__PAGE_OFFSET, %esp | 1156 | cmpl $__PAGE_OFFSET, %esp |
1163 | jb ftrace_stub /* Paging not enabled yet? */ | 1157 | jb ftrace_stub /* Paging not enabled yet? */ |
1164 | 1158 | ||
1165 | cmpl $0, function_trace_stop | ||
1166 | jne ftrace_stub | ||
1167 | |||
1168 | cmpl $ftrace_stub, ftrace_trace_function | 1159 | cmpl $ftrace_stub, ftrace_trace_function |
1169 | jnz trace | 1160 | jnz trace |
1170 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1161 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cbc4a91b131e..3386dc9aa333 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -703,6 +703,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
703 | unsigned long return_hooker = (unsigned long) | 703 | unsigned long return_hooker = (unsigned long) |
704 | &return_to_handler; | 704 | &return_to_handler; |
705 | 705 | ||
706 | if (unlikely(ftrace_graph_is_dead())) | ||
707 | return; | ||
708 | |||
706 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 709 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
707 | return; | 710 | return; |
708 | 711 | ||
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index c050a0153168..c73aecf10d34 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
@@ -46,10 +46,6 @@ END(function_hook) | |||
46 | .endm | 46 | .endm |
47 | 47 | ||
48 | ENTRY(ftrace_caller) | 48 | ENTRY(ftrace_caller) |
49 | /* Check if tracing was disabled (quick check) */ | ||
50 | cmpl $0, function_trace_stop | ||
51 | jne ftrace_stub | ||
52 | |||
53 | ftrace_caller_setup | 49 | ftrace_caller_setup |
54 | /* regs go into 4th parameter (but make it NULL) */ | 50 | /* regs go into 4th parameter (but make it NULL) */ |
55 | movq $0, %rcx | 51 | movq $0, %rcx |
@@ -73,10 +69,6 @@ ENTRY(ftrace_regs_caller) | |||
73 | /* Save the current flags before compare (in SS location)*/ | 69 | /* Save the current flags before compare (in SS location)*/ |
74 | pushfq | 70 | pushfq |
75 | 71 | ||
76 | /* Check if tracing was disabled (quick check) */ | ||
77 | cmpl $0, function_trace_stop | ||
78 | jne ftrace_restore_flags | ||
79 | |||
80 | /* skip=8 to skip flags saved in SS */ | 72 | /* skip=8 to skip flags saved in SS */ |
81 | ftrace_caller_setup 8 | 73 | ftrace_caller_setup 8 |
82 | 74 | ||
@@ -131,7 +123,7 @@ GLOBAL(ftrace_regs_call) | |||
131 | popfq | 123 | popfq |
132 | 124 | ||
133 | jmp ftrace_return | 125 | jmp ftrace_return |
134 | ftrace_restore_flags: | 126 | |
135 | popfq | 127 | popfq |
136 | jmp ftrace_stub | 128 | jmp ftrace_stub |
137 | 129 | ||
@@ -141,9 +133,6 @@ END(ftrace_regs_caller) | |||
141 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 133 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
142 | 134 | ||
143 | ENTRY(function_hook) | 135 | ENTRY(function_hook) |
144 | cmpl $0, function_trace_stop | ||
145 | jne ftrace_stub | ||
146 | |||
147 | cmpq $ftrace_stub, ftrace_trace_function | 136 | cmpq $ftrace_stub, ftrace_trace_function |
148 | jnz trace | 137 | jnz trace |
149 | 138 | ||
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 9d2e0ffcb190..2e5652b62fd6 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
@@ -22,7 +22,7 @@ | |||
22 | __entry->unsync = sp->unsync; | 22 | __entry->unsync = sp->unsync; |
23 | 23 | ||
24 | #define KVM_MMU_PAGE_PRINTK() ({ \ | 24 | #define KVM_MMU_PAGE_PRINTK() ({ \ |
25 | const char *ret = p->buffer + p->len; \ | 25 | const char *ret = trace_seq_buffer_ptr(p); \ |
26 | static const char *access_str[] = { \ | 26 | static const char *access_str[] = { \ |
27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ | 27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ |
28 | }; \ | 28 | }; \ |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 424f4c97a44d..6ec7910f59bf 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -165,7 +165,7 @@ static void fix_processor_context(void) | |||
165 | * by __save_processor_state() | 165 | * by __save_processor_state() |
166 | * @ctxt - structure to load the registers contents from | 166 | * @ctxt - structure to load the registers contents from |
167 | */ | 167 | */ |
168 | static void __restore_processor_state(struct saved_context *ctxt) | 168 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
169 | { | 169 | { |
170 | if (ctxt->misc_enable_saved) | 170 | if (ctxt->misc_enable_saved) |
171 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); | 171 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); |
@@ -239,7 +239,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | /* Needed by apm.c */ | 241 | /* Needed by apm.c */ |
242 | void restore_processor_state(void) | 242 | void notrace restore_processor_state(void) |
243 | { | 243 | { |
244 | __restore_processor_state(&saved_context); | 244 | __restore_processor_state(&saved_context); |
245 | } | 245 | } |
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 2bea4f0b684a..503594e5f76d 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c | |||
@@ -28,7 +28,7 @@ scsi_trace_misc(struct trace_seq *, unsigned char *, int); | |||
28 | static const char * | 28 | static const char * |
29 | scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) | 29 | scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) |
30 | { | 30 | { |
31 | const char *ret = p->buffer + p->len; | 31 | const char *ret = trace_seq_buffer_ptr(p); |
32 | sector_t lba = 0, txlen = 0; | 32 | sector_t lba = 0, txlen = 0; |
33 | 33 | ||
34 | lba |= ((cdb[1] & 0x1F) << 16); | 34 | lba |= ((cdb[1] & 0x1F) << 16); |
@@ -46,7 +46,7 @@ scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) | |||
46 | static const char * | 46 | static const char * |
47 | scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) | 47 | scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) |
48 | { | 48 | { |
49 | const char *ret = p->buffer + p->len; | 49 | const char *ret = trace_seq_buffer_ptr(p); |
50 | sector_t lba = 0, txlen = 0; | 50 | sector_t lba = 0, txlen = 0; |
51 | 51 | ||
52 | lba |= (cdb[2] << 24); | 52 | lba |= (cdb[2] << 24); |
@@ -71,7 +71,7 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) | |||
71 | static const char * | 71 | static const char * |
72 | scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) | 72 | scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) |
73 | { | 73 | { |
74 | const char *ret = p->buffer + p->len; | 74 | const char *ret = trace_seq_buffer_ptr(p); |
75 | sector_t lba = 0, txlen = 0; | 75 | sector_t lba = 0, txlen = 0; |
76 | 76 | ||
77 | lba |= (cdb[2] << 24); | 77 | lba |= (cdb[2] << 24); |
@@ -94,7 +94,7 @@ scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) | |||
94 | static const char * | 94 | static const char * |
95 | scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) | 95 | scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) |
96 | { | 96 | { |
97 | const char *ret = p->buffer + p->len; | 97 | const char *ret = trace_seq_buffer_ptr(p); |
98 | sector_t lba = 0, txlen = 0; | 98 | sector_t lba = 0, txlen = 0; |
99 | 99 | ||
100 | lba |= ((u64)cdb[2] << 56); | 100 | lba |= ((u64)cdb[2] << 56); |
@@ -125,7 +125,7 @@ scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) | |||
125 | static const char * | 125 | static const char * |
126 | scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) | 126 | scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) |
127 | { | 127 | { |
128 | const char *ret = p->buffer + p->len, *cmd; | 128 | const char *ret = trace_seq_buffer_ptr(p), *cmd; |
129 | sector_t lba = 0, txlen = 0; | 129 | sector_t lba = 0, txlen = 0; |
130 | u32 ei_lbrt = 0; | 130 | u32 ei_lbrt = 0; |
131 | 131 | ||
@@ -180,7 +180,7 @@ out: | |||
180 | static const char * | 180 | static const char * |
181 | scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) | 181 | scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) |
182 | { | 182 | { |
183 | const char *ret = p->buffer + p->len; | 183 | const char *ret = trace_seq_buffer_ptr(p); |
184 | unsigned int regions = cdb[7] << 8 | cdb[8]; | 184 | unsigned int regions = cdb[7] << 8 | cdb[8]; |
185 | 185 | ||
186 | trace_seq_printf(p, "regions=%u", (regions - 8) / 16); | 186 | trace_seq_printf(p, "regions=%u", (regions - 8) / 16); |
@@ -192,7 +192,7 @@ scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) | |||
192 | static const char * | 192 | static const char * |
193 | scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) | 193 | scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) |
194 | { | 194 | { |
195 | const char *ret = p->buffer + p->len, *cmd; | 195 | const char *ret = trace_seq_buffer_ptr(p), *cmd; |
196 | sector_t lba = 0; | 196 | sector_t lba = 0; |
197 | u32 alloc_len = 0; | 197 | u32 alloc_len = 0; |
198 | 198 | ||
@@ -247,7 +247,7 @@ scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) | |||
247 | static const char * | 247 | static const char * |
248 | scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) | 248 | scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) |
249 | { | 249 | { |
250 | const char *ret = p->buffer + p->len; | 250 | const char *ret = trace_seq_buffer_ptr(p); |
251 | 251 | ||
252 | trace_seq_printf(p, "-"); | 252 | trace_seq_printf(p, "-"); |
253 | trace_seq_putc(p, 0); | 253 | trace_seq_putc(p, 0); |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 404a686a3644..6bb5e3f2a3b4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -33,8 +33,7 @@ | |||
33 | * features, then it must call an indirect function that | 33 | * features, then it must call an indirect function that |
34 | * does. Or at least does enough to prevent any unwelcomed side effects. | 34 | * does. Or at least does enough to prevent any unwelcomed side effects. |
35 | */ | 35 | */ |
36 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ | 36 | #if !ARCH_SUPPORTS_FTRACE_OPS |
37 | !ARCH_SUPPORTS_FTRACE_OPS | ||
38 | # define FTRACE_FORCE_LIST_FUNC 1 | 37 | # define FTRACE_FORCE_LIST_FUNC 1 |
39 | #else | 38 | #else |
40 | # define FTRACE_FORCE_LIST_FUNC 0 | 39 | # define FTRACE_FORCE_LIST_FUNC 0 |
@@ -118,17 +117,18 @@ struct ftrace_ops { | |||
118 | ftrace_func_t func; | 117 | ftrace_func_t func; |
119 | struct ftrace_ops *next; | 118 | struct ftrace_ops *next; |
120 | unsigned long flags; | 119 | unsigned long flags; |
121 | int __percpu *disabled; | ||
122 | void *private; | 120 | void *private; |
121 | int __percpu *disabled; | ||
123 | #ifdef CONFIG_DYNAMIC_FTRACE | 122 | #ifdef CONFIG_DYNAMIC_FTRACE |
123 | int nr_trampolines; | ||
124 | struct ftrace_hash *notrace_hash; | 124 | struct ftrace_hash *notrace_hash; |
125 | struct ftrace_hash *filter_hash; | 125 | struct ftrace_hash *filter_hash; |
126 | struct ftrace_hash *tramp_hash; | ||
126 | struct mutex regex_lock; | 127 | struct mutex regex_lock; |
128 | unsigned long trampoline; | ||
127 | #endif | 129 | #endif |
128 | }; | 130 | }; |
129 | 131 | ||
130 | extern int function_trace_stop; | ||
131 | |||
132 | /* | 132 | /* |
133 | * Type of the current tracing. | 133 | * Type of the current tracing. |
134 | */ | 134 | */ |
@@ -140,32 +140,6 @@ enum ftrace_tracing_type_t { | |||
140 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | 140 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
141 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | 141 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
142 | 142 | ||
143 | /** | ||
144 | * ftrace_stop - stop function tracer. | ||
145 | * | ||
146 | * A quick way to stop the function tracer. Note this an on off switch, | ||
147 | * it is not something that is recursive like preempt_disable. | ||
148 | * This does not disable the calling of mcount, it only stops the | ||
149 | * calling of functions from mcount. | ||
150 | */ | ||
151 | static inline void ftrace_stop(void) | ||
152 | { | ||
153 | function_trace_stop = 1; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * ftrace_start - start the function tracer. | ||
158 | * | ||
159 | * This function is the inverse of ftrace_stop. This does not enable | ||
160 | * the function tracing if the function tracer is disabled. This only | ||
161 | * sets the function tracer flag to continue calling the functions | ||
162 | * from mcount. | ||
163 | */ | ||
164 | static inline void ftrace_start(void) | ||
165 | { | ||
166 | function_trace_stop = 0; | ||
167 | } | ||
168 | |||
169 | /* | 143 | /* |
170 | * The ftrace_ops must be a static and should also | 144 | * The ftrace_ops must be a static and should also |
171 | * be read_mostly. These functions do modify read_mostly variables | 145 | * be read_mostly. These functions do modify read_mostly variables |
@@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void) | |||
242 | } | 216 | } |
243 | static inline void clear_ftrace_function(void) { } | 217 | static inline void clear_ftrace_function(void) { } |
244 | static inline void ftrace_kill(void) { } | 218 | static inline void ftrace_kill(void) { } |
245 | static inline void ftrace_stop(void) { } | ||
246 | static inline void ftrace_start(void) { } | ||
247 | #endif /* CONFIG_FUNCTION_TRACER */ | 219 | #endif /* CONFIG_FUNCTION_TRACER */ |
248 | 220 | ||
249 | #ifdef CONFIG_STACK_TRACER | 221 | #ifdef CONFIG_STACK_TRACER |
@@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void); | |||
317 | * from tracing that function. | 289 | * from tracing that function. |
318 | */ | 290 | */ |
319 | enum { | 291 | enum { |
320 | FTRACE_FL_ENABLED = (1UL << 29), | 292 | FTRACE_FL_ENABLED = (1UL << 31), |
321 | FTRACE_FL_REGS = (1UL << 30), | 293 | FTRACE_FL_REGS = (1UL << 30), |
322 | FTRACE_FL_REGS_EN = (1UL << 31) | 294 | FTRACE_FL_REGS_EN = (1UL << 29), |
295 | FTRACE_FL_TRAMP = (1UL << 28), | ||
296 | FTRACE_FL_TRAMP_EN = (1UL << 27), | ||
323 | }; | 297 | }; |
324 | 298 | ||
325 | #define FTRACE_FL_MASK (0x7UL << 29) | 299 | #define FTRACE_REF_MAX_SHIFT 27 |
326 | #define FTRACE_REF_MAX ((1UL << 29) - 1) | 300 | #define FTRACE_FL_BITS 5 |
301 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | ||
302 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | ||
303 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | ||
304 | |||
305 | #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) | ||
327 | 306 | ||
328 | struct dyn_ftrace { | 307 | struct dyn_ftrace { |
329 | unsigned long ip; /* address of mcount call-site */ | 308 | unsigned long ip; /* address of mcount call-site */ |
@@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command); | |||
431 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | 410 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
432 | #endif | 411 | #endif |
433 | 412 | ||
413 | #ifndef FTRACE_GRAPH_ADDR | ||
414 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | ||
415 | #endif | ||
416 | |||
434 | #ifndef FTRACE_REGS_ADDR | 417 | #ifndef FTRACE_REGS_ADDR |
435 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 418 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
436 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) | 419 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
@@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command); | |||
439 | #endif | 422 | #endif |
440 | #endif | 423 | #endif |
441 | 424 | ||
425 | /* | ||
426 | * If an arch would like functions that are only traced | ||
427 | * by the function graph tracer to jump directly to its own | ||
428 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | ||
429 | * to be that address to jump to. | ||
430 | */ | ||
431 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | ||
432 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | ||
433 | #endif | ||
434 | |||
442 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 435 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
443 | extern void ftrace_graph_caller(void); | 436 | extern void ftrace_graph_caller(void); |
444 | extern int ftrace_enable_ftrace_graph_caller(void); | 437 | extern int ftrace_enable_ftrace_graph_caller(void); |
@@ -736,6 +729,7 @@ extern char __irqentry_text_end[]; | |||
736 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 729 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
737 | trace_func_graph_ent_t entryfunc); | 730 | trace_func_graph_ent_t entryfunc); |
738 | 731 | ||
732 | extern bool ftrace_graph_is_dead(void); | ||
739 | extern void ftrace_graph_stop(void); | 733 | extern void ftrace_graph_stop(void); |
740 | 734 | ||
741 | /* The current handlers in use */ | 735 | /* The current handlers in use */ |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 136116924d8d..ea6c9dea79e3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
@@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s) | |||
25 | s->full = 0; | 25 | s->full = 0; |
26 | } | 26 | } |
27 | 27 | ||
28 | /** | ||
29 | * trace_seq_buffer_ptr - return pointer to next location in buffer | ||
30 | * @s: trace sequence descriptor | ||
31 | * | ||
32 | * Returns the pointer to the buffer where the next write to | ||
33 | * the buffer will happen. This is useful to save the location | ||
34 | * that is about to be written to and then return the result | ||
35 | * of that write. | ||
36 | */ | ||
37 | static inline unsigned char * | ||
38 | trace_seq_buffer_ptr(struct trace_seq *s) | ||
39 | { | ||
40 | return s->buffer + s->len; | ||
41 | } | ||
42 | |||
28 | /* | 43 | /* |
29 | * Currently only defined when tracing is enabled. | 44 | * Currently only defined when tracing is enabled. |
30 | */ | 45 | */ |
@@ -36,14 +51,13 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); | |||
36 | extern int | 51 | extern int |
37 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 52 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
38 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); | 53 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
39 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 54 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
40 | size_t cnt); | 55 | int cnt); |
41 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 56 | extern int trace_seq_puts(struct trace_seq *s, const char *str); |
42 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | 57 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); |
43 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); | 58 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); |
44 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 59 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
45 | size_t len); | 60 | unsigned int len); |
46 | extern void *trace_seq_reserve(struct trace_seq *s, size_t len); | ||
47 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); | 61 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); |
48 | 62 | ||
49 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 63 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
@@ -71,8 +85,8 @@ static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
71 | { | 85 | { |
72 | return 0; | 86 | return 0; |
73 | } | 87 | } |
74 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 88 | static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
75 | size_t cnt) | 89 | int cnt) |
76 | { | 90 | { |
77 | return 0; | 91 | return 0; |
78 | } | 92 | } |
@@ -85,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | |||
85 | return 0; | 99 | return 0; |
86 | } | 100 | } |
87 | static inline int | 101 | static inline int |
88 | trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | 102 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
89 | { | 103 | { |
90 | return 0; | 104 | return 0; |
91 | } | 105 | } |
92 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 106 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
93 | size_t len) | 107 | unsigned int len) |
94 | { | 108 | { |
95 | return 0; | 109 | return 0; |
96 | } | 110 | } |
97 | static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
98 | { | ||
99 | return NULL; | ||
100 | } | ||
101 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) | 111 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) |
102 | { | 112 | { |
103 | return 0; | 113 | return 0; |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index fcc2611d3f14..a9dfa79b6bab 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -371,7 +371,6 @@ int hibernation_snapshot(int platform_mode) | |||
371 | } | 371 | } |
372 | 372 | ||
373 | suspend_console(); | 373 | suspend_console(); |
374 | ftrace_stop(); | ||
375 | pm_restrict_gfp_mask(); | 374 | pm_restrict_gfp_mask(); |
376 | 375 | ||
377 | error = dpm_suspend(PMSG_FREEZE); | 376 | error = dpm_suspend(PMSG_FREEZE); |
@@ -397,7 +396,6 @@ int hibernation_snapshot(int platform_mode) | |||
397 | if (error || !in_suspend) | 396 | if (error || !in_suspend) |
398 | pm_restore_gfp_mask(); | 397 | pm_restore_gfp_mask(); |
399 | 398 | ||
400 | ftrace_start(); | ||
401 | resume_console(); | 399 | resume_console(); |
402 | dpm_complete(msg); | 400 | dpm_complete(msg); |
403 | 401 | ||
@@ -500,7 +498,6 @@ int hibernation_restore(int platform_mode) | |||
500 | 498 | ||
501 | pm_prepare_console(); | 499 | pm_prepare_console(); |
502 | suspend_console(); | 500 | suspend_console(); |
503 | ftrace_stop(); | ||
504 | pm_restrict_gfp_mask(); | 501 | pm_restrict_gfp_mask(); |
505 | error = dpm_suspend_start(PMSG_QUIESCE); | 502 | error = dpm_suspend_start(PMSG_QUIESCE); |
506 | if (!error) { | 503 | if (!error) { |
@@ -508,7 +505,6 @@ int hibernation_restore(int platform_mode) | |||
508 | dpm_resume_end(PMSG_RECOVER); | 505 | dpm_resume_end(PMSG_RECOVER); |
509 | } | 506 | } |
510 | pm_restore_gfp_mask(); | 507 | pm_restore_gfp_mask(); |
511 | ftrace_start(); | ||
512 | resume_console(); | 508 | resume_console(); |
513 | pm_restore_console(); | 509 | pm_restore_console(); |
514 | return error; | 510 | return error; |
@@ -535,7 +531,6 @@ int hibernation_platform_enter(void) | |||
535 | 531 | ||
536 | entering_platform_hibernation = true; | 532 | entering_platform_hibernation = true; |
537 | suspend_console(); | 533 | suspend_console(); |
538 | ftrace_stop(); | ||
539 | error = dpm_suspend_start(PMSG_HIBERNATE); | 534 | error = dpm_suspend_start(PMSG_HIBERNATE); |
540 | if (error) { | 535 | if (error) { |
541 | if (hibernation_ops->recover) | 536 | if (hibernation_ops->recover) |
@@ -579,7 +574,6 @@ int hibernation_platform_enter(void) | |||
579 | Resume_devices: | 574 | Resume_devices: |
580 | entering_platform_hibernation = false; | 575 | entering_platform_hibernation = false; |
581 | dpm_resume_end(PMSG_RESTORE); | 576 | dpm_resume_end(PMSG_RESTORE); |
582 | ftrace_start(); | ||
583 | resume_console(); | 577 | resume_console(); |
584 | 578 | ||
585 | Close: | 579 | Close: |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index ed35a4790afe..4b736b4dfa96 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -248,7 +248,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
248 | goto Platform_wake; | 248 | goto Platform_wake; |
249 | } | 249 | } |
250 | 250 | ||
251 | ftrace_stop(); | ||
252 | error = disable_nonboot_cpus(); | 251 | error = disable_nonboot_cpus(); |
253 | if (error || suspend_test(TEST_CPUS)) | 252 | if (error || suspend_test(TEST_CPUS)) |
254 | goto Enable_cpus; | 253 | goto Enable_cpus; |
@@ -275,7 +274,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
275 | 274 | ||
276 | Enable_cpus: | 275 | Enable_cpus: |
277 | enable_nonboot_cpus(); | 276 | enable_nonboot_cpus(); |
278 | ftrace_start(); | ||
279 | 277 | ||
280 | Platform_wake: | 278 | Platform_wake: |
281 | if (need_suspend_ops(state) && suspend_ops->wake) | 279 | if (need_suspend_ops(state) && suspend_ops->wake) |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d4409356f40d..a5da09c899dd 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -29,11 +29,6 @@ config HAVE_FUNCTION_GRAPH_FP_TEST | |||
29 | help | 29 | help |
30 | See Documentation/trace/ftrace-design.txt | 30 | See Documentation/trace/ftrace-design.txt |
31 | 31 | ||
32 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
33 | bool | ||
34 | help | ||
35 | See Documentation/trace/ftrace-design.txt | ||
36 | |||
37 | config HAVE_DYNAMIC_FTRACE | 32 | config HAVE_DYNAMIC_FTRACE |
38 | bool | 33 | bool |
39 | help | 34 | help |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 2611613f14f1..67d6369ddf83 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o | |||
28 | 28 | ||
29 | obj-$(CONFIG_TRACING) += trace.o | 29 | obj-$(CONFIG_TRACING) += trace.o |
30 | obj-$(CONFIG_TRACING) += trace_output.o | 30 | obj-$(CONFIG_TRACING) += trace_output.o |
31 | obj-$(CONFIG_TRACING) += trace_seq.o | ||
31 | obj-$(CONFIG_TRACING) += trace_stat.o | 32 | obj-$(CONFIG_TRACING) += trace_stat.o |
32 | obj-$(CONFIG_TRACING) += trace_printk.o | 33 | obj-$(CONFIG_TRACING) += trace_printk.o |
33 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 34 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ac9d1dad630b..1654b12c891a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -80,9 +80,6 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { | |||
80 | int ftrace_enabled __read_mostly; | 80 | int ftrace_enabled __read_mostly; |
81 | static int last_ftrace_enabled; | 81 | static int last_ftrace_enabled; |
82 | 82 | ||
83 | /* Quick disabling of function tracer. */ | ||
84 | int function_trace_stop __read_mostly; | ||
85 | |||
86 | /* Current function tracing op */ | 83 | /* Current function tracing op */ |
87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | 84 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
88 | /* What to set function_trace_op to */ | 85 | /* What to set function_trace_op to */ |
@@ -1042,6 +1039,8 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid; | |||
1042 | 1039 | ||
1043 | #ifdef CONFIG_DYNAMIC_FTRACE | 1040 | #ifdef CONFIG_DYNAMIC_FTRACE |
1044 | 1041 | ||
1042 | static struct ftrace_ops *removed_ops; | ||
1043 | |||
1045 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1044 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1046 | # error Dynamic ftrace depends on MCOUNT_RECORD | 1045 | # error Dynamic ftrace depends on MCOUNT_RECORD |
1047 | #endif | 1046 | #endif |
@@ -1304,25 +1303,15 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1304 | struct ftrace_hash *new_hash; | 1303 | struct ftrace_hash *new_hash; |
1305 | int size = src->count; | 1304 | int size = src->count; |
1306 | int bits = 0; | 1305 | int bits = 0; |
1307 | int ret; | ||
1308 | int i; | 1306 | int i; |
1309 | 1307 | ||
1310 | /* | 1308 | /* |
1311 | * Remove the current set, update the hash and add | ||
1312 | * them back. | ||
1313 | */ | ||
1314 | ftrace_hash_rec_disable(ops, enable); | ||
1315 | |||
1316 | /* | ||
1317 | * If the new source is empty, just free dst and assign it | 1309 | * If the new source is empty, just free dst and assign it |
1318 | * the empty_hash. | 1310 | * the empty_hash. |
1319 | */ | 1311 | */ |
1320 | if (!src->count) { | 1312 | if (!src->count) { |
1321 | free_ftrace_hash_rcu(*dst); | 1313 | new_hash = EMPTY_HASH; |
1322 | rcu_assign_pointer(*dst, EMPTY_HASH); | 1314 | goto update; |
1323 | /* still need to update the function records */ | ||
1324 | ret = 0; | ||
1325 | goto out; | ||
1326 | } | 1315 | } |
1327 | 1316 | ||
1328 | /* | 1317 | /* |
@@ -1335,10 +1324,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1335 | if (bits > FTRACE_HASH_MAX_BITS) | 1324 | if (bits > FTRACE_HASH_MAX_BITS) |
1336 | bits = FTRACE_HASH_MAX_BITS; | 1325 | bits = FTRACE_HASH_MAX_BITS; |
1337 | 1326 | ||
1338 | ret = -ENOMEM; | ||
1339 | new_hash = alloc_ftrace_hash(bits); | 1327 | new_hash = alloc_ftrace_hash(bits); |
1340 | if (!new_hash) | 1328 | if (!new_hash) |
1341 | goto out; | 1329 | return -ENOMEM; |
1342 | 1330 | ||
1343 | size = 1 << src->size_bits; | 1331 | size = 1 << src->size_bits; |
1344 | for (i = 0; i < size; i++) { | 1332 | for (i = 0; i < size; i++) { |
@@ -1349,20 +1337,20 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1349 | } | 1337 | } |
1350 | } | 1338 | } |
1351 | 1339 | ||
1340 | update: | ||
1341 | /* | ||
1342 | * Remove the current set, update the hash and add | ||
1343 | * them back. | ||
1344 | */ | ||
1345 | ftrace_hash_rec_disable(ops, enable); | ||
1346 | |||
1352 | old_hash = *dst; | 1347 | old_hash = *dst; |
1353 | rcu_assign_pointer(*dst, new_hash); | 1348 | rcu_assign_pointer(*dst, new_hash); |
1354 | free_ftrace_hash_rcu(old_hash); | 1349 | free_ftrace_hash_rcu(old_hash); |
1355 | 1350 | ||
1356 | ret = 0; | ||
1357 | out: | ||
1358 | /* | ||
1359 | * Enable regardless of ret: | ||
1360 | * On success, we enable the new hash. | ||
1361 | * On failure, we re-enable the original hash. | ||
1362 | */ | ||
1363 | ftrace_hash_rec_enable(ops, enable); | 1351 | ftrace_hash_rec_enable(ops, enable); |
1364 | 1352 | ||
1365 | return ret; | 1353 | return 0; |
1366 | } | 1354 | } |
1367 | 1355 | ||
1368 | /* | 1356 | /* |
@@ -1492,6 +1480,53 @@ int ftrace_text_reserved(const void *start, const void *end) | |||
1492 | return (int)!!ret; | 1480 | return (int)!!ret; |
1493 | } | 1481 | } |
1494 | 1482 | ||
1483 | /* Test if ops registered to this rec needs regs */ | ||
1484 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | ||
1485 | { | ||
1486 | struct ftrace_ops *ops; | ||
1487 | bool keep_regs = false; | ||
1488 | |||
1489 | for (ops = ftrace_ops_list; | ||
1490 | ops != &ftrace_list_end; ops = ops->next) { | ||
1491 | /* pass rec in as regs to have non-NULL val */ | ||
1492 | if (ftrace_ops_test(ops, rec->ip, rec)) { | ||
1493 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | ||
1494 | keep_regs = true; | ||
1495 | break; | ||
1496 | } | ||
1497 | } | ||
1498 | } | ||
1499 | |||
1500 | return keep_regs; | ||
1501 | } | ||
1502 | |||
1503 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | ||
1504 | struct dyn_ftrace *rec) | ||
1505 | { | ||
1506 | struct ftrace_func_entry *entry; | ||
1507 | |||
1508 | entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); | ||
1509 | if (!entry) | ||
1510 | return; | ||
1511 | |||
1512 | /* | ||
1513 | * The tramp_hash entry will be removed at time | ||
1514 | * of update. | ||
1515 | */ | ||
1516 | ops->nr_trampolines--; | ||
1517 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
1518 | } | ||
1519 | |||
1520 | static void ftrace_clear_tramps(struct dyn_ftrace *rec) | ||
1521 | { | ||
1522 | struct ftrace_ops *op; | ||
1523 | |||
1524 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1525 | if (op->nr_trampolines) | ||
1526 | ftrace_remove_tramp(op, rec); | ||
1527 | } while_for_each_ftrace_op(op); | ||
1528 | } | ||
1529 | |||
1495 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | 1530 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, |
1496 | int filter_hash, | 1531 | int filter_hash, |
1497 | bool inc) | 1532 | bool inc) |
@@ -1572,8 +1607,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1572 | 1607 | ||
1573 | if (inc) { | 1608 | if (inc) { |
1574 | rec->flags++; | 1609 | rec->flags++; |
1575 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | 1610 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
1576 | return; | 1611 | return; |
1612 | |||
1613 | /* | ||
1614 | * If there's only a single callback registered to a | ||
1615 | * function, and the ops has a trampoline registered | ||
1616 | * for it, then we can call it directly. | ||
1617 | */ | ||
1618 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) { | ||
1619 | rec->flags |= FTRACE_FL_TRAMP; | ||
1620 | ops->nr_trampolines++; | ||
1621 | } else { | ||
1622 | /* | ||
1623 | * If we are adding another function callback | ||
1624 | * to this function, and the previous had a | ||
1625 | * trampoline used, then we need to go back to | ||
1626 | * the default trampoline. | ||
1627 | */ | ||
1628 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
1629 | |||
1630 | /* remove trampolines from any ops for this rec */ | ||
1631 | ftrace_clear_tramps(rec); | ||
1632 | } | ||
1633 | |||
1577 | /* | 1634 | /* |
1578 | * If any ops wants regs saved for this function | 1635 | * If any ops wants regs saved for this function |
1579 | * then all ops will get saved regs. | 1636 | * then all ops will get saved regs. |
@@ -1581,9 +1638,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1581 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | 1638 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
1582 | rec->flags |= FTRACE_FL_REGS; | 1639 | rec->flags |= FTRACE_FL_REGS; |
1583 | } else { | 1640 | } else { |
1584 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | 1641 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
1585 | return; | 1642 | return; |
1586 | rec->flags--; | 1643 | rec->flags--; |
1644 | |||
1645 | if (ops->trampoline && !ftrace_rec_count(rec)) | ||
1646 | ftrace_remove_tramp(ops, rec); | ||
1647 | |||
1648 | /* | ||
1649 | * If the rec had REGS enabled and the ops that is | ||
1650 | * being removed had REGS set, then see if there is | ||
1651 | * still any ops for this record that wants regs. | ||
1652 | * If not, we can stop recording them. | ||
1653 | */ | ||
1654 | if (ftrace_rec_count(rec) > 0 && | ||
1655 | rec->flags & FTRACE_FL_REGS && | ||
1656 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | ||
1657 | if (!test_rec_ops_needs_regs(rec)) | ||
1658 | rec->flags &= ~FTRACE_FL_REGS; | ||
1659 | } | ||
1660 | |||
1661 | /* | ||
1662 | * flags will be cleared in ftrace_check_record() | ||
1663 | * if rec count is zero. | ||
1664 | */ | ||
1587 | } | 1665 | } |
1588 | count++; | 1666 | count++; |
1589 | /* Shortcut, if we handled all records, we are done. */ | 1667 | /* Shortcut, if we handled all records, we are done. */ |
@@ -1668,17 +1746,23 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1668 | * If we are disabling calls, then disable all records that | 1746 | * If we are disabling calls, then disable all records that |
1669 | * are enabled. | 1747 | * are enabled. |
1670 | */ | 1748 | */ |
1671 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) | 1749 | if (enable && ftrace_rec_count(rec)) |
1672 | flag = FTRACE_FL_ENABLED; | 1750 | flag = FTRACE_FL_ENABLED; |
1673 | 1751 | ||
1674 | /* | 1752 | /* |
1675 | * If enabling and the REGS flag does not match the REGS_EN, then | 1753 | * If enabling and the REGS flag does not match the REGS_EN, or |
1676 | * do not ignore this record. Set flags to fail the compare against | 1754 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore |
1677 | * ENABLED. | 1755 | * this record. Set flags to fail the compare against ENABLED. |
1678 | */ | 1756 | */ |
1679 | if (flag && | 1757 | if (flag) { |
1680 | (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) | 1758 | if (!(rec->flags & FTRACE_FL_REGS) != |
1681 | flag |= FTRACE_FL_REGS; | 1759 | !(rec->flags & FTRACE_FL_REGS_EN)) |
1760 | flag |= FTRACE_FL_REGS; | ||
1761 | |||
1762 | if (!(rec->flags & FTRACE_FL_TRAMP) != | ||
1763 | !(rec->flags & FTRACE_FL_TRAMP_EN)) | ||
1764 | flag |= FTRACE_FL_TRAMP; | ||
1765 | } | ||
1682 | 1766 | ||
1683 | /* If the state of this record hasn't changed, then do nothing */ | 1767 | /* If the state of this record hasn't changed, then do nothing */ |
1684 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1768 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
@@ -1696,6 +1780,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1696 | else | 1780 | else |
1697 | rec->flags &= ~FTRACE_FL_REGS_EN; | 1781 | rec->flags &= ~FTRACE_FL_REGS_EN; |
1698 | } | 1782 | } |
1783 | if (flag & FTRACE_FL_TRAMP) { | ||
1784 | if (rec->flags & FTRACE_FL_TRAMP) | ||
1785 | rec->flags |= FTRACE_FL_TRAMP_EN; | ||
1786 | else | ||
1787 | rec->flags &= ~FTRACE_FL_TRAMP_EN; | ||
1788 | } | ||
1699 | } | 1789 | } |
1700 | 1790 | ||
1701 | /* | 1791 | /* |
@@ -1704,7 +1794,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1704 | * Otherwise, | 1794 | * Otherwise, |
1705 | * return UPDATE_MODIFY_CALL to tell the caller to convert | 1795 | * return UPDATE_MODIFY_CALL to tell the caller to convert |
1706 | * from the save regs, to a non-save regs function or | 1796 | * from the save regs, to a non-save regs function or |
1707 | * vice versa. | 1797 | * vice versa, or from a trampoline call. |
1708 | */ | 1798 | */ |
1709 | if (flag & FTRACE_FL_ENABLED) | 1799 | if (flag & FTRACE_FL_ENABLED) |
1710 | return FTRACE_UPDATE_MAKE_CALL; | 1800 | return FTRACE_UPDATE_MAKE_CALL; |
@@ -1714,7 +1804,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1714 | 1804 | ||
1715 | if (update) { | 1805 | if (update) { |
1716 | /* If there's no more users, clear all flags */ | 1806 | /* If there's no more users, clear all flags */ |
1717 | if (!(rec->flags & ~FTRACE_FL_MASK)) | 1807 | if (!ftrace_rec_count(rec)) |
1718 | rec->flags = 0; | 1808 | rec->flags = 0; |
1719 | else | 1809 | else |
1720 | /* Just disable the record (keep REGS state) */ | 1810 | /* Just disable the record (keep REGS state) */ |
@@ -1751,6 +1841,43 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
1751 | return ftrace_check_record(rec, enable, 0); | 1841 | return ftrace_check_record(rec, enable, 0); |
1752 | } | 1842 | } |
1753 | 1843 | ||
1844 | static struct ftrace_ops * | ||
1845 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | ||
1846 | { | ||
1847 | struct ftrace_ops *op; | ||
1848 | |||
1849 | /* Removed ops need to be tested first */ | ||
1850 | if (removed_ops && removed_ops->tramp_hash) { | ||
1851 | if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip)) | ||
1852 | return removed_ops; | ||
1853 | } | ||
1854 | |||
1855 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1856 | if (!op->tramp_hash) | ||
1857 | continue; | ||
1858 | |||
1859 | if (ftrace_lookup_ip(op->tramp_hash, rec->ip)) | ||
1860 | return op; | ||
1861 | |||
1862 | } while_for_each_ftrace_op(op); | ||
1863 | |||
1864 | return NULL; | ||
1865 | } | ||
1866 | |||
1867 | static struct ftrace_ops * | ||
1868 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) | ||
1869 | { | ||
1870 | struct ftrace_ops *op; | ||
1871 | |||
1872 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
1873 | /* pass rec in as regs to have non-NULL val */ | ||
1874 | if (ftrace_ops_test(op, rec->ip, rec)) | ||
1875 | return op; | ||
1876 | } while_for_each_ftrace_op(op); | ||
1877 | |||
1878 | return NULL; | ||
1879 | } | ||
1880 | |||
1754 | /** | 1881 | /** |
1755 | * ftrace_get_addr_new - Get the call address to set to | 1882 | * ftrace_get_addr_new - Get the call address to set to |
1756 | * @rec: The ftrace record descriptor | 1883 | * @rec: The ftrace record descriptor |
@@ -1763,6 +1890,20 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
1763 | */ | 1890 | */ |
1764 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | 1891 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) |
1765 | { | 1892 | { |
1893 | struct ftrace_ops *ops; | ||
1894 | |||
1895 | /* Trampolines take precedence over regs */ | ||
1896 | if (rec->flags & FTRACE_FL_TRAMP) { | ||
1897 | ops = ftrace_find_tramp_ops_new(rec); | ||
1898 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | ||
1899 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | ||
1900 | (void *)rec->ip, (void *)rec->ip); | ||
1901 | /* Ftrace is shutting down, return anything */ | ||
1902 | return (unsigned long)FTRACE_ADDR; | ||
1903 | } | ||
1904 | return ops->trampoline; | ||
1905 | } | ||
1906 | |||
1766 | if (rec->flags & FTRACE_FL_REGS) | 1907 | if (rec->flags & FTRACE_FL_REGS) |
1767 | return (unsigned long)FTRACE_REGS_ADDR; | 1908 | return (unsigned long)FTRACE_REGS_ADDR; |
1768 | else | 1909 | else |
@@ -1781,6 +1922,20 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |||
1781 | */ | 1922 | */ |
1782 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) | 1923 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) |
1783 | { | 1924 | { |
1925 | struct ftrace_ops *ops; | ||
1926 | |||
1927 | /* Trampolines take precedence over regs */ | ||
1928 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | ||
1929 | ops = ftrace_find_tramp_ops_curr(rec); | ||
1930 | if (FTRACE_WARN_ON(!ops)) { | ||
1931 | pr_warning("Bad trampoline accounting at: %p (%pS)\n", | ||
1932 | (void *)rec->ip, (void *)rec->ip); | ||
1933 | /* Ftrace is shutting down, return anything */ | ||
1934 | return (unsigned long)FTRACE_ADDR; | ||
1935 | } | ||
1936 | return ops->trampoline; | ||
1937 | } | ||
1938 | |||
1784 | if (rec->flags & FTRACE_FL_REGS_EN) | 1939 | if (rec->flags & FTRACE_FL_REGS_EN) |
1785 | return (unsigned long)FTRACE_REGS_ADDR; | 1940 | return (unsigned long)FTRACE_REGS_ADDR; |
1786 | else | 1941 | else |
@@ -2023,6 +2178,89 @@ void __weak arch_ftrace_update_code(int command) | |||
2023 | ftrace_run_stop_machine(command); | 2178 | ftrace_run_stop_machine(command); |
2024 | } | 2179 | } |
2025 | 2180 | ||
2181 | static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | ||
2182 | { | ||
2183 | struct ftrace_page *pg; | ||
2184 | struct dyn_ftrace *rec; | ||
2185 | int size, bits; | ||
2186 | int ret; | ||
2187 | |||
2188 | size = ops->nr_trampolines; | ||
2189 | bits = 0; | ||
2190 | /* | ||
2191 | * Make the hash size about 1/2 the # found | ||
2192 | */ | ||
2193 | for (size /= 2; size; size >>= 1) | ||
2194 | bits++; | ||
2195 | |||
2196 | ops->tramp_hash = alloc_ftrace_hash(bits); | ||
2197 | /* | ||
2198 | * TODO: a failed allocation is going to screw up | ||
2199 | * the accounting of what needs to be modified | ||
2200 | * and not. For now, we kill ftrace if we fail | ||
2201 | * to allocate here. But there are ways around this, | ||
2202 | * but that will take a little more work. | ||
2203 | */ | ||
2204 | if (!ops->tramp_hash) | ||
2205 | return -ENOMEM; | ||
2206 | |||
2207 | do_for_each_ftrace_rec(pg, rec) { | ||
2208 | if (ftrace_rec_count(rec) == 1 && | ||
2209 | ftrace_ops_test(ops, rec->ip, rec)) { | ||
2210 | |||
2211 | /* | ||
2212 | * If another ops adds to a rec, the rec will | ||
2213 | * lose its trampoline and never get it back | ||
2214 | * until all ops are off of it. | ||
2215 | */ | ||
2216 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
2217 | continue; | ||
2218 | |||
2219 | /* This record had better have a trampoline */ | ||
2220 | if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN))) | ||
2221 | return -1; | ||
2222 | |||
2223 | ret = add_hash_entry(ops->tramp_hash, rec->ip); | ||
2224 | if (ret < 0) | ||
2225 | return ret; | ||
2226 | } | ||
2227 | } while_for_each_ftrace_rec(); | ||
2228 | |||
2229 | /* The number of recs in the hash must match nr_trampolines */ | ||
2230 | FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); | ||
2231 | |||
2232 | return 0; | ||
2233 | } | ||
2234 | |||
2235 | static int ftrace_save_tramp_hashes(void) | ||
2236 | { | ||
2237 | struct ftrace_ops *op; | ||
2238 | int ret; | ||
2239 | |||
2240 | /* | ||
2241 | * Now that any trampoline is being used, we need to save the | ||
2242 | * hashes for the ops that have them. This allows the mapping | ||
2243 | * back from the record to the ops that has the trampoline to | ||
2244 | * know what code is being replaced. Modifying code must always | ||
2245 | * verify what it is changing. | ||
2246 | */ | ||
2247 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
2248 | |||
2249 | /* The tramp_hash is recreated each time. */ | ||
2250 | free_ftrace_hash(op->tramp_hash); | ||
2251 | op->tramp_hash = NULL; | ||
2252 | |||
2253 | if (op->nr_trampolines) { | ||
2254 | ret = ftrace_save_ops_tramp_hash(op); | ||
2255 | if (ret) | ||
2256 | return ret; | ||
2257 | } | ||
2258 | |||
2259 | } while_for_each_ftrace_op(op); | ||
2260 | |||
2261 | return 0; | ||
2262 | } | ||
2263 | |||
2026 | static void ftrace_run_update_code(int command) | 2264 | static void ftrace_run_update_code(int command) |
2027 | { | 2265 | { |
2028 | int ret; | 2266 | int ret; |
@@ -2031,11 +2269,6 @@ static void ftrace_run_update_code(int command) | |||
2031 | FTRACE_WARN_ON(ret); | 2269 | FTRACE_WARN_ON(ret); |
2032 | if (ret) | 2270 | if (ret) |
2033 | return; | 2271 | return; |
2034 | /* | ||
2035 | * Do not call function tracer while we update the code. | ||
2036 | * We are in stop machine. | ||
2037 | */ | ||
2038 | function_trace_stop++; | ||
2039 | 2272 | ||
2040 | /* | 2273 | /* |
2041 | * By default we use stop_machine() to modify the code. | 2274 | * By default we use stop_machine() to modify the code. |
@@ -2045,15 +2278,15 @@ static void ftrace_run_update_code(int command) | |||
2045 | */ | 2278 | */ |
2046 | arch_ftrace_update_code(command); | 2279 | arch_ftrace_update_code(command); |
2047 | 2280 | ||
2048 | function_trace_stop--; | ||
2049 | |||
2050 | ret = ftrace_arch_code_modify_post_process(); | 2281 | ret = ftrace_arch_code_modify_post_process(); |
2051 | FTRACE_WARN_ON(ret); | 2282 | FTRACE_WARN_ON(ret); |
2283 | |||
2284 | ret = ftrace_save_tramp_hashes(); | ||
2285 | FTRACE_WARN_ON(ret); | ||
2052 | } | 2286 | } |
2053 | 2287 | ||
2054 | static ftrace_func_t saved_ftrace_func; | 2288 | static ftrace_func_t saved_ftrace_func; |
2055 | static int ftrace_start_up; | 2289 | static int ftrace_start_up; |
2056 | static int global_start_up; | ||
2057 | 2290 | ||
2058 | static void control_ops_free(struct ftrace_ops *ops) | 2291 | static void control_ops_free(struct ftrace_ops *ops) |
2059 | { | 2292 | { |
@@ -2117,8 +2350,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2117 | 2350 | ||
2118 | ftrace_hash_rec_disable(ops, 1); | 2351 | ftrace_hash_rec_disable(ops, 1); |
2119 | 2352 | ||
2120 | if (!global_start_up) | 2353 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
2121 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
2122 | 2354 | ||
2123 | command |= FTRACE_UPDATE_CALLS; | 2355 | command |= FTRACE_UPDATE_CALLS; |
2124 | 2356 | ||
@@ -2139,8 +2371,16 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2139 | return 0; | 2371 | return 0; |
2140 | } | 2372 | } |
2141 | 2373 | ||
2374 | /* | ||
2375 | * If the ops uses a trampoline, then it needs to be | ||
2376 | * tested first on update. | ||
2377 | */ | ||
2378 | removed_ops = ops; | ||
2379 | |||
2142 | ftrace_run_update_code(command); | 2380 | ftrace_run_update_code(command); |
2143 | 2381 | ||
2382 | removed_ops = NULL; | ||
2383 | |||
2144 | /* | 2384 | /* |
2145 | * Dynamic ops may be freed, we must make sure that all | 2385 | * Dynamic ops may be freed, we must make sure that all |
2146 | * callers are done before leaving this function. | 2386 | * callers are done before leaving this function. |
@@ -2398,7 +2638,8 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
2398 | return start_pg; | 2638 | return start_pg; |
2399 | 2639 | ||
2400 | free_pages: | 2640 | free_pages: |
2401 | while (start_pg) { | 2641 | pg = start_pg; |
2642 | while (pg) { | ||
2402 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | 2643 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
2403 | free_pages((unsigned long)pg->records, order); | 2644 | free_pages((unsigned long)pg->records, order); |
2404 | start_pg = pg->next; | 2645 | start_pg = pg->next; |
@@ -2595,8 +2836,10 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
2595 | * off, we can short cut and just print out that all | 2836 | * off, we can short cut and just print out that all |
2596 | * functions are enabled. | 2837 | * functions are enabled. |
2597 | */ | 2838 | */ |
2598 | if (iter->flags & FTRACE_ITER_FILTER && | 2839 | if ((iter->flags & FTRACE_ITER_FILTER && |
2599 | ftrace_hash_empty(ops->filter_hash)) { | 2840 | ftrace_hash_empty(ops->filter_hash)) || |
2841 | (iter->flags & FTRACE_ITER_NOTRACE && | ||
2842 | ftrace_hash_empty(ops->notrace_hash))) { | ||
2600 | if (*pos > 0) | 2843 | if (*pos > 0) |
2601 | return t_hash_start(m, pos); | 2844 | return t_hash_start(m, pos); |
2602 | iter->flags |= FTRACE_ITER_PRINTALL; | 2845 | iter->flags |= FTRACE_ITER_PRINTALL; |
@@ -2641,7 +2884,10 @@ static int t_show(struct seq_file *m, void *v) | |||
2641 | return t_hash_show(m, iter); | 2884 | return t_hash_show(m, iter); |
2642 | 2885 | ||
2643 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 2886 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
2644 | seq_printf(m, "#### all functions enabled ####\n"); | 2887 | if (iter->flags & FTRACE_ITER_NOTRACE) |
2888 | seq_printf(m, "#### no functions disabled ####\n"); | ||
2889 | else | ||
2890 | seq_printf(m, "#### all functions enabled ####\n"); | ||
2645 | return 0; | 2891 | return 0; |
2646 | } | 2892 | } |
2647 | 2893 | ||
@@ -2651,10 +2897,22 @@ static int t_show(struct seq_file *m, void *v) | |||
2651 | return 0; | 2897 | return 0; |
2652 | 2898 | ||
2653 | seq_printf(m, "%ps", (void *)rec->ip); | 2899 | seq_printf(m, "%ps", (void *)rec->ip); |
2654 | if (iter->flags & FTRACE_ITER_ENABLED) | 2900 | if (iter->flags & FTRACE_ITER_ENABLED) { |
2655 | seq_printf(m, " (%ld)%s", | 2901 | seq_printf(m, " (%ld)%s", |
2656 | rec->flags & ~FTRACE_FL_MASK, | 2902 | ftrace_rec_count(rec), |
2657 | rec->flags & FTRACE_FL_REGS ? " R" : ""); | 2903 | rec->flags & FTRACE_FL_REGS ? " R" : " "); |
2904 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | ||
2905 | struct ftrace_ops *ops; | ||
2906 | |||
2907 | ops = ftrace_find_tramp_ops_curr(rec); | ||
2908 | if (ops && ops->trampoline) | ||
2909 | seq_printf(m, "\ttramp: %pS", | ||
2910 | (void *)ops->trampoline); | ||
2911 | else | ||
2912 | seq_printf(m, "\ttramp: ERROR!"); | ||
2913 | } | ||
2914 | } | ||
2915 | |||
2658 | seq_printf(m, "\n"); | 2916 | seq_printf(m, "\n"); |
2659 | 2917 | ||
2660 | return 0; | 2918 | return 0; |
@@ -2702,13 +2960,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file) | |||
2702 | return iter ? 0 : -ENOMEM; | 2960 | return iter ? 0 : -ENOMEM; |
2703 | } | 2961 | } |
2704 | 2962 | ||
2705 | static void ftrace_filter_reset(struct ftrace_hash *hash) | ||
2706 | { | ||
2707 | mutex_lock(&ftrace_lock); | ||
2708 | ftrace_hash_clear(hash); | ||
2709 | mutex_unlock(&ftrace_lock); | ||
2710 | } | ||
2711 | |||
2712 | /** | 2963 | /** |
2713 | * ftrace_regex_open - initialize function tracer filter files | 2964 | * ftrace_regex_open - initialize function tracer filter files |
2714 | * @ops: The ftrace_ops that hold the hash filters | 2965 | * @ops: The ftrace_ops that hold the hash filters |
@@ -2758,7 +3009,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
2758 | hash = ops->filter_hash; | 3009 | hash = ops->filter_hash; |
2759 | 3010 | ||
2760 | if (file->f_mode & FMODE_WRITE) { | 3011 | if (file->f_mode & FMODE_WRITE) { |
2761 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | 3012 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
3013 | |||
3014 | if (file->f_flags & O_TRUNC) | ||
3015 | iter->hash = alloc_ftrace_hash(size_bits); | ||
3016 | else | ||
3017 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); | ||
3018 | |||
2762 | if (!iter->hash) { | 3019 | if (!iter->hash) { |
2763 | trace_parser_put(&iter->parser); | 3020 | trace_parser_put(&iter->parser); |
2764 | kfree(iter); | 3021 | kfree(iter); |
@@ -2767,10 +3024,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
2767 | } | 3024 | } |
2768 | } | 3025 | } |
2769 | 3026 | ||
2770 | if ((file->f_mode & FMODE_WRITE) && | ||
2771 | (file->f_flags & O_TRUNC)) | ||
2772 | ftrace_filter_reset(iter->hash); | ||
2773 | |||
2774 | if (file->f_mode & FMODE_READ) { | 3027 | if (file->f_mode & FMODE_READ) { |
2775 | iter->pg = ftrace_pages_start; | 3028 | iter->pg = ftrace_pages_start; |
2776 | 3029 | ||
@@ -3471,14 +3724,16 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3471 | else | 3724 | else |
3472 | orig_hash = &ops->notrace_hash; | 3725 | orig_hash = &ops->notrace_hash; |
3473 | 3726 | ||
3474 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3727 | if (reset) |
3728 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | ||
3729 | else | ||
3730 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
3731 | |||
3475 | if (!hash) { | 3732 | if (!hash) { |
3476 | ret = -ENOMEM; | 3733 | ret = -ENOMEM; |
3477 | goto out_regex_unlock; | 3734 | goto out_regex_unlock; |
3478 | } | 3735 | } |
3479 | 3736 | ||
3480 | if (reset) | ||
3481 | ftrace_filter_reset(hash); | ||
3482 | if (buf && !ftrace_match_records(hash, buf, len)) { | 3737 | if (buf && !ftrace_match_records(hash, buf, len)) { |
3483 | ret = -EINVAL; | 3738 | ret = -EINVAL; |
3484 | goto out_regex_unlock; | 3739 | goto out_regex_unlock; |
@@ -3630,6 +3885,7 @@ __setup("ftrace_filter=", set_ftrace_filter); | |||
3630 | 3885 | ||
3631 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3886 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3632 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 3887 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
3888 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | ||
3633 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | 3889 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); |
3634 | 3890 | ||
3635 | static int __init set_graph_function(char *str) | 3891 | static int __init set_graph_function(char *str) |
@@ -3639,16 +3895,29 @@ static int __init set_graph_function(char *str) | |||
3639 | } | 3895 | } |
3640 | __setup("ftrace_graph_filter=", set_graph_function); | 3896 | __setup("ftrace_graph_filter=", set_graph_function); |
3641 | 3897 | ||
3642 | static void __init set_ftrace_early_graph(char *buf) | 3898 | static int __init set_graph_notrace_function(char *str) |
3899 | { | ||
3900 | strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); | ||
3901 | return 1; | ||
3902 | } | ||
3903 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); | ||
3904 | |||
3905 | static void __init set_ftrace_early_graph(char *buf, int enable) | ||
3643 | { | 3906 | { |
3644 | int ret; | 3907 | int ret; |
3645 | char *func; | 3908 | char *func; |
3909 | unsigned long *table = ftrace_graph_funcs; | ||
3910 | int *count = &ftrace_graph_count; | ||
3911 | |||
3912 | if (!enable) { | ||
3913 | table = ftrace_graph_notrace_funcs; | ||
3914 | count = &ftrace_graph_notrace_count; | ||
3915 | } | ||
3646 | 3916 | ||
3647 | while (buf) { | 3917 | while (buf) { |
3648 | func = strsep(&buf, ","); | 3918 | func = strsep(&buf, ","); |
3649 | /* we allow only one expression at a time */ | 3919 | /* we allow only one expression at a time */ |
3650 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 3920 | ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func); |
3651 | FTRACE_GRAPH_MAX_FUNCS, func); | ||
3652 | if (ret) | 3921 | if (ret) |
3653 | printk(KERN_DEBUG "ftrace: function %s not " | 3922 | printk(KERN_DEBUG "ftrace: function %s not " |
3654 | "traceable\n", func); | 3923 | "traceable\n", func); |
@@ -3677,7 +3946,9 @@ static void __init set_ftrace_early_filters(void) | |||
3677 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); | 3946 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); |
3678 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3947 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3679 | if (ftrace_graph_buf[0]) | 3948 | if (ftrace_graph_buf[0]) |
3680 | set_ftrace_early_graph(ftrace_graph_buf); | 3949 | set_ftrace_early_graph(ftrace_graph_buf, 1); |
3950 | if (ftrace_graph_notrace_buf[0]) | ||
3951 | set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); | ||
3681 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 3952 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
3682 | } | 3953 | } |
3683 | 3954 | ||
@@ -3819,7 +4090,12 @@ static int g_show(struct seq_file *m, void *v) | |||
3819 | return 0; | 4090 | return 0; |
3820 | 4091 | ||
3821 | if (ptr == (unsigned long *)1) { | 4092 | if (ptr == (unsigned long *)1) { |
3822 | seq_printf(m, "#### all functions enabled ####\n"); | 4093 | struct ftrace_graph_data *fgd = m->private; |
4094 | |||
4095 | if (fgd->table == ftrace_graph_funcs) | ||
4096 | seq_printf(m, "#### all functions enabled ####\n"); | ||
4097 | else | ||
4098 | seq_printf(m, "#### no functions disabled ####\n"); | ||
3823 | return 0; | 4099 | return 0; |
3824 | } | 4100 | } |
3825 | 4101 | ||
@@ -4447,9 +4723,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4447 | struct ftrace_ops *op; | 4723 | struct ftrace_ops *op; |
4448 | int bit; | 4724 | int bit; |
4449 | 4725 | ||
4450 | if (function_trace_stop) | ||
4451 | return; | ||
4452 | |||
4453 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | 4726 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
4454 | if (bit < 0) | 4727 | if (bit < 0) |
4455 | return; | 4728 | return; |
@@ -4461,9 +4734,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4461 | preempt_disable_notrace(); | 4734 | preempt_disable_notrace(); |
4462 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 4735 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4463 | if (ftrace_ops_test(op, ip, regs)) { | 4736 | if (ftrace_ops_test(op, ip, regs)) { |
4464 | if (WARN_ON(!op->func)) { | 4737 | if (FTRACE_WARN_ON(!op->func)) { |
4465 | function_trace_stop = 1; | 4738 | pr_warn("op=%p %pS\n", op, op); |
4466 | printk("op=%p %pS\n", op, op); | ||
4467 | goto out; | 4739 | goto out; |
4468 | } | 4740 | } |
4469 | op->func(ip, parent_ip, op, regs); | 4741 | op->func(ip, parent_ip, op, regs); |
@@ -5084,6 +5356,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
5084 | /* Function graph doesn't use the .func field of global_ops */ | 5356 | /* Function graph doesn't use the .func field of global_ops */ |
5085 | global_ops.flags |= FTRACE_OPS_FL_STUB; | 5357 | global_ops.flags |= FTRACE_OPS_FL_STUB; |
5086 | 5358 | ||
5359 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
5360 | /* Optimize function graph calling (if implemented by arch) */ | ||
5361 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
5362 | global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; | ||
5363 | #endif | ||
5364 | |||
5087 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | 5365 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
5088 | 5366 | ||
5089 | out: | 5367 | out: |
@@ -5104,6 +5382,10 @@ void unregister_ftrace_graph(void) | |||
5104 | __ftrace_graph_entry = ftrace_graph_entry_stub; | 5382 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
5105 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); | 5383 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); |
5106 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; | 5384 | global_ops.flags &= ~FTRACE_OPS_FL_STUB; |
5385 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
5386 | if (FTRACE_GRAPH_TRAMP_ADDR != 0) | ||
5387 | global_ops.trampoline = 0; | ||
5388 | #endif | ||
5107 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5389 | unregister_pm_notifier(&ftrace_suspend_notifier); |
5108 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5390 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
5109 | 5391 | ||
@@ -5183,9 +5465,4 @@ void ftrace_graph_exit_task(struct task_struct *t) | |||
5183 | 5465 | ||
5184 | kfree(ret_stack); | 5466 | kfree(ret_stack); |
5185 | } | 5467 | } |
5186 | |||
5187 | void ftrace_graph_stop(void) | ||
5188 | { | ||
5189 | ftrace_stop(); | ||
5190 | } | ||
5191 | #endif | 5468 | #endif |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ff7027199a9a..925f629658d6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1689,22 +1689,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1689 | if (!cpu_buffer->nr_pages_to_update) | 1689 | if (!cpu_buffer->nr_pages_to_update) |
1690 | continue; | 1690 | continue; |
1691 | 1691 | ||
1692 | /* The update must run on the CPU that is being updated. */ | 1692 | /* Can't run something on an offline CPU. */ |
1693 | preempt_disable(); | 1693 | if (!cpu_online(cpu)) { |
1694 | if (cpu == smp_processor_id() || !cpu_online(cpu)) { | ||
1695 | rb_update_pages(cpu_buffer); | 1694 | rb_update_pages(cpu_buffer); |
1696 | cpu_buffer->nr_pages_to_update = 0; | 1695 | cpu_buffer->nr_pages_to_update = 0; |
1697 | } else { | 1696 | } else { |
1698 | /* | ||
1699 | * Can not disable preemption for schedule_work_on() | ||
1700 | * on PREEMPT_RT. | ||
1701 | */ | ||
1702 | preempt_enable(); | ||
1703 | schedule_work_on(cpu, | 1697 | schedule_work_on(cpu, |
1704 | &cpu_buffer->update_pages_work); | 1698 | &cpu_buffer->update_pages_work); |
1705 | preempt_disable(); | ||
1706 | } | 1699 | } |
1707 | preempt_enable(); | ||
1708 | } | 1700 | } |
1709 | 1701 | ||
1710 | /* wait for all the updates to complete */ | 1702 | /* wait for all the updates to complete */ |
@@ -1742,22 +1734,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1742 | 1734 | ||
1743 | get_online_cpus(); | 1735 | get_online_cpus(); |
1744 | 1736 | ||
1745 | preempt_disable(); | 1737 | /* Can't run something on an offline CPU. */ |
1746 | /* The update must run on the CPU that is being updated. */ | 1738 | if (!cpu_online(cpu_id)) |
1747 | if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) | ||
1748 | rb_update_pages(cpu_buffer); | 1739 | rb_update_pages(cpu_buffer); |
1749 | else { | 1740 | else { |
1750 | /* | ||
1751 | * Can not disable preemption for schedule_work_on() | ||
1752 | * on PREEMPT_RT. | ||
1753 | */ | ||
1754 | preempt_enable(); | ||
1755 | schedule_work_on(cpu_id, | 1741 | schedule_work_on(cpu_id, |
1756 | &cpu_buffer->update_pages_work); | 1742 | &cpu_buffer->update_pages_work); |
1757 | wait_for_completion(&cpu_buffer->update_done); | 1743 | wait_for_completion(&cpu_buffer->update_done); |
1758 | preempt_disable(); | ||
1759 | } | 1744 | } |
1760 | preempt_enable(); | ||
1761 | 1745 | ||
1762 | cpu_buffer->nr_pages_to_update = 0; | 1746 | cpu_buffer->nr_pages_to_update = 0; |
1763 | put_online_cpus(); | 1747 | put_online_cpus(); |
@@ -3775,7 +3759,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3775 | if (rb_per_cpu_empty(cpu_buffer)) | 3759 | if (rb_per_cpu_empty(cpu_buffer)) |
3776 | return NULL; | 3760 | return NULL; |
3777 | 3761 | ||
3778 | if (iter->head >= local_read(&iter->head_page->page->commit)) { | 3762 | if (iter->head >= rb_page_size(iter->head_page)) { |
3779 | rb_inc_iter(iter); | 3763 | rb_inc_iter(iter); |
3780 | goto again; | 3764 | goto again; |
3781 | } | 3765 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 291397e66669..8bb80fe08767 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -937,30 +937,6 @@ out: | |||
937 | return ret; | 937 | return ret; |
938 | } | 938 | } |
939 | 939 | ||
940 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | ||
941 | { | ||
942 | int len; | ||
943 | int ret; | ||
944 | |||
945 | if (!cnt) | ||
946 | return 0; | ||
947 | |||
948 | if (s->len <= s->readpos) | ||
949 | return -EBUSY; | ||
950 | |||
951 | len = s->len - s->readpos; | ||
952 | if (cnt > len) | ||
953 | cnt = len; | ||
954 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | ||
955 | if (ret == cnt) | ||
956 | return -EFAULT; | ||
957 | |||
958 | cnt -= ret; | ||
959 | |||
960 | s->readpos += cnt; | ||
961 | return cnt; | ||
962 | } | ||
963 | |||
964 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 940 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
965 | { | 941 | { |
966 | int len; | 942 | int len; |
@@ -3699,6 +3675,7 @@ static const char readme_msg[] = | |||
3699 | #endif | 3675 | #endif |
3700 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3676 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3701 | " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" | 3677 | " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" |
3678 | " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" | ||
3702 | " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" | 3679 | " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" |
3703 | #endif | 3680 | #endif |
3704 | #ifdef CONFIG_TRACER_SNAPSHOT | 3681 | #ifdef CONFIG_TRACER_SNAPSHOT |
@@ -4238,10 +4215,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
4238 | } | 4215 | } |
4239 | 4216 | ||
4240 | static ssize_t | 4217 | static ssize_t |
4241 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | 4218 | tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, |
4242 | size_t cnt, loff_t *ppos) | 4219 | size_t cnt, loff_t *ppos) |
4243 | { | 4220 | { |
4244 | unsigned long *ptr = filp->private_data; | ||
4245 | char buf[64]; | 4221 | char buf[64]; |
4246 | int r; | 4222 | int r; |
4247 | 4223 | ||
@@ -4253,10 +4229,9 @@ tracing_max_lat_read(struct file *filp, char __user *ubuf, | |||
4253 | } | 4229 | } |
4254 | 4230 | ||
4255 | static ssize_t | 4231 | static ssize_t |
4256 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 4232 | tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, |
4257 | size_t cnt, loff_t *ppos) | 4233 | size_t cnt, loff_t *ppos) |
4258 | { | 4234 | { |
4259 | unsigned long *ptr = filp->private_data; | ||
4260 | unsigned long val; | 4235 | unsigned long val; |
4261 | int ret; | 4236 | int ret; |
4262 | 4237 | ||
@@ -4269,6 +4244,52 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
4269 | return cnt; | 4244 | return cnt; |
4270 | } | 4245 | } |
4271 | 4246 | ||
4247 | static ssize_t | ||
4248 | tracing_thresh_read(struct file *filp, char __user *ubuf, | ||
4249 | size_t cnt, loff_t *ppos) | ||
4250 | { | ||
4251 | return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); | ||
4252 | } | ||
4253 | |||
4254 | static ssize_t | ||
4255 | tracing_thresh_write(struct file *filp, const char __user *ubuf, | ||
4256 | size_t cnt, loff_t *ppos) | ||
4257 | { | ||
4258 | struct trace_array *tr = filp->private_data; | ||
4259 | int ret; | ||
4260 | |||
4261 | mutex_lock(&trace_types_lock); | ||
4262 | ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); | ||
4263 | if (ret < 0) | ||
4264 | goto out; | ||
4265 | |||
4266 | if (tr->current_trace->update_thresh) { | ||
4267 | ret = tr->current_trace->update_thresh(tr); | ||
4268 | if (ret < 0) | ||
4269 | goto out; | ||
4270 | } | ||
4271 | |||
4272 | ret = cnt; | ||
4273 | out: | ||
4274 | mutex_unlock(&trace_types_lock); | ||
4275 | |||
4276 | return ret; | ||
4277 | } | ||
4278 | |||
4279 | static ssize_t | ||
4280 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | ||
4281 | size_t cnt, loff_t *ppos) | ||
4282 | { | ||
4283 | return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); | ||
4284 | } | ||
4285 | |||
4286 | static ssize_t | ||
4287 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | ||
4288 | size_t cnt, loff_t *ppos) | ||
4289 | { | ||
4290 | return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); | ||
4291 | } | ||
4292 | |||
4272 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 4293 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
4273 | { | 4294 | { |
4274 | struct trace_array *tr = inode->i_private; | 4295 | struct trace_array *tr = inode->i_private; |
@@ -5170,6 +5191,13 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp) | |||
5170 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 5191 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
5171 | 5192 | ||
5172 | 5193 | ||
5194 | static const struct file_operations tracing_thresh_fops = { | ||
5195 | .open = tracing_open_generic, | ||
5196 | .read = tracing_thresh_read, | ||
5197 | .write = tracing_thresh_write, | ||
5198 | .llseek = generic_file_llseek, | ||
5199 | }; | ||
5200 | |||
5173 | static const struct file_operations tracing_max_lat_fops = { | 5201 | static const struct file_operations tracing_max_lat_fops = { |
5174 | .open = tracing_open_generic, | 5202 | .open = tracing_open_generic, |
5175 | .read = tracing_max_lat_read, | 5203 | .read = tracing_max_lat_read, |
@@ -6107,10 +6135,8 @@ destroy_trace_option_files(struct trace_option_dentry *topts) | |||
6107 | if (!topts) | 6135 | if (!topts) |
6108 | return; | 6136 | return; |
6109 | 6137 | ||
6110 | for (cnt = 0; topts[cnt].opt; cnt++) { | 6138 | for (cnt = 0; topts[cnt].opt; cnt++) |
6111 | if (topts[cnt].entry) | 6139 | debugfs_remove(topts[cnt].entry); |
6112 | debugfs_remove(topts[cnt].entry); | ||
6113 | } | ||
6114 | 6140 | ||
6115 | kfree(topts); | 6141 | kfree(topts); |
6116 | } | 6142 | } |
@@ -6533,7 +6559,7 @@ static __init int tracer_init_debugfs(void) | |||
6533 | init_tracer_debugfs(&global_trace, d_tracer); | 6559 | init_tracer_debugfs(&global_trace, d_tracer); |
6534 | 6560 | ||
6535 | trace_create_file("tracing_thresh", 0644, d_tracer, | 6561 | trace_create_file("tracing_thresh", 0644, d_tracer, |
6536 | &tracing_thresh, &tracing_max_lat_fops); | 6562 | &global_trace, &tracing_thresh_fops); |
6537 | 6563 | ||
6538 | trace_create_file("README", 0444, d_tracer, | 6564 | trace_create_file("README", 0444, d_tracer, |
6539 | NULL, &tracing_readme_fops); | 6565 | NULL, &tracing_readme_fops); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 9258f5a815db..385391fb1d3b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -339,6 +339,7 @@ struct tracer_flags { | |||
339 | * @reset: called when one switches to another tracer | 339 | * @reset: called when one switches to another tracer |
340 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | 340 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) |
341 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | 341 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) |
342 | * @update_thresh: called when tracing_thresh is updated | ||
342 | * @open: called when the trace file is opened | 343 | * @open: called when the trace file is opened |
343 | * @pipe_open: called when the trace_pipe file is opened | 344 | * @pipe_open: called when the trace_pipe file is opened |
344 | * @close: called when the trace file is released | 345 | * @close: called when the trace file is released |
@@ -357,6 +358,7 @@ struct tracer { | |||
357 | void (*reset)(struct trace_array *tr); | 358 | void (*reset)(struct trace_array *tr); |
358 | void (*start)(struct trace_array *tr); | 359 | void (*start)(struct trace_array *tr); |
359 | void (*stop)(struct trace_array *tr); | 360 | void (*stop)(struct trace_array *tr); |
361 | int (*update_thresh)(struct trace_array *tr); | ||
360 | void (*open)(struct trace_iterator *iter); | 362 | void (*open)(struct trace_iterator *iter); |
361 | void (*pipe_open)(struct trace_iterator *iter); | 363 | void (*pipe_open)(struct trace_iterator *iter); |
362 | void (*close)(struct trace_iterator *iter); | 364 | void (*close)(struct trace_iterator *iter); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 2de53628689f..3154eb39241d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) fmt | ||
12 | |||
11 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
12 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
13 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
@@ -1491,7 +1493,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
1491 | 1493 | ||
1492 | dir->entry = debugfs_create_dir(name, parent); | 1494 | dir->entry = debugfs_create_dir(name, parent); |
1493 | if (!dir->entry) { | 1495 | if (!dir->entry) { |
1494 | pr_warning("Failed to create system directory %s\n", name); | 1496 | pr_warn("Failed to create system directory %s\n", name); |
1495 | __put_system(system); | 1497 | __put_system(system); |
1496 | goto out_free; | 1498 | goto out_free; |
1497 | } | 1499 | } |
@@ -1507,7 +1509,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
1507 | if (!entry) { | 1509 | if (!entry) { |
1508 | kfree(system->filter); | 1510 | kfree(system->filter); |
1509 | system->filter = NULL; | 1511 | system->filter = NULL; |
1510 | pr_warning("Could not create debugfs '%s/filter' entry\n", name); | 1512 | pr_warn("Could not create debugfs '%s/filter' entry\n", name); |
1511 | } | 1513 | } |
1512 | 1514 | ||
1513 | trace_create_file("enable", 0644, dir->entry, dir, | 1515 | trace_create_file("enable", 0644, dir->entry, dir, |
@@ -1522,8 +1524,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
1522 | out_fail: | 1524 | out_fail: |
1523 | /* Only print this message if failed on memory allocation */ | 1525 | /* Only print this message if failed on memory allocation */ |
1524 | if (!dir || !system) | 1526 | if (!dir || !system) |
1525 | pr_warning("No memory to create event subsystem %s\n", | 1527 | pr_warn("No memory to create event subsystem %s\n", name); |
1526 | name); | ||
1527 | return NULL; | 1528 | return NULL; |
1528 | } | 1529 | } |
1529 | 1530 | ||
@@ -1551,8 +1552,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1551 | name = ftrace_event_name(call); | 1552 | name = ftrace_event_name(call); |
1552 | file->dir = debugfs_create_dir(name, d_events); | 1553 | file->dir = debugfs_create_dir(name, d_events); |
1553 | if (!file->dir) { | 1554 | if (!file->dir) { |
1554 | pr_warning("Could not create debugfs '%s' directory\n", | 1555 | pr_warn("Could not create debugfs '%s' directory\n", name); |
1555 | name); | ||
1556 | return -1; | 1556 | return -1; |
1557 | } | 1557 | } |
1558 | 1558 | ||
@@ -1575,8 +1575,8 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1575 | if (list_empty(head)) { | 1575 | if (list_empty(head)) { |
1576 | ret = call->class->define_fields(call); | 1576 | ret = call->class->define_fields(call); |
1577 | if (ret < 0) { | 1577 | if (ret < 0) { |
1578 | pr_warning("Could not initialize trace point" | 1578 | pr_warn("Could not initialize trace point events/%s\n", |
1579 | " events/%s\n", name); | 1579 | name); |
1580 | return -1; | 1580 | return -1; |
1581 | } | 1581 | } |
1582 | } | 1582 | } |
@@ -1649,8 +1649,7 @@ static int event_init(struct ftrace_event_call *call) | |||
1649 | if (call->class->raw_init) { | 1649 | if (call->class->raw_init) { |
1650 | ret = call->class->raw_init(call); | 1650 | ret = call->class->raw_init(call); |
1651 | if (ret < 0 && ret != -ENOSYS) | 1651 | if (ret < 0 && ret != -ENOSYS) |
1652 | pr_warn("Could not initialize trace events/%s\n", | 1652 | pr_warn("Could not initialize trace events/%s\n", name); |
1653 | name); | ||
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | return ret; | 1655 | return ret; |
@@ -1895,8 +1894,8 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
1895 | list_for_each_entry(call, &ftrace_events, list) { | 1894 | list_for_each_entry(call, &ftrace_events, list) { |
1896 | ret = __trace_add_new_event(call, tr); | 1895 | ret = __trace_add_new_event(call, tr); |
1897 | if (ret < 0) | 1896 | if (ret < 0) |
1898 | pr_warning("Could not create directory for event %s\n", | 1897 | pr_warn("Could not create directory for event %s\n", |
1899 | ftrace_event_name(call)); | 1898 | ftrace_event_name(call)); |
1900 | } | 1899 | } |
1901 | } | 1900 | } |
1902 | 1901 | ||
@@ -2208,8 +2207,8 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
2208 | list_for_each_entry(file, &tr->events, list) { | 2207 | list_for_each_entry(file, &tr->events, list) { |
2209 | ret = event_create_dir(tr->event_dir, file); | 2208 | ret = event_create_dir(tr->event_dir, file); |
2210 | if (ret < 0) | 2209 | if (ret < 0) |
2211 | pr_warning("Could not create directory for event %s\n", | 2210 | pr_warn("Could not create directory for event %s\n", |
2212 | ftrace_event_name(file->event_call)); | 2211 | ftrace_event_name(file->event_call)); |
2213 | } | 2212 | } |
2214 | } | 2213 | } |
2215 | 2214 | ||
@@ -2232,8 +2231,8 @@ __trace_early_add_events(struct trace_array *tr) | |||
2232 | 2231 | ||
2233 | ret = __trace_early_add_new_event(call, tr); | 2232 | ret = __trace_early_add_new_event(call, tr); |
2234 | if (ret < 0) | 2233 | if (ret < 0) |
2235 | pr_warning("Could not create early event %s\n", | 2234 | pr_warn("Could not create early event %s\n", |
2236 | ftrace_event_name(call)); | 2235 | ftrace_event_name(call)); |
2237 | } | 2236 | } |
2238 | } | 2237 | } |
2239 | 2238 | ||
@@ -2280,13 +2279,13 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) | |||
2280 | entry = debugfs_create_file("set_event", 0644, parent, | 2279 | entry = debugfs_create_file("set_event", 0644, parent, |
2281 | tr, &ftrace_set_event_fops); | 2280 | tr, &ftrace_set_event_fops); |
2282 | if (!entry) { | 2281 | if (!entry) { |
2283 | pr_warning("Could not create debugfs 'set_event' entry\n"); | 2282 | pr_warn("Could not create debugfs 'set_event' entry\n"); |
2284 | return -ENOMEM; | 2283 | return -ENOMEM; |
2285 | } | 2284 | } |
2286 | 2285 | ||
2287 | d_events = debugfs_create_dir("events", parent); | 2286 | d_events = debugfs_create_dir("events", parent); |
2288 | if (!d_events) { | 2287 | if (!d_events) { |
2289 | pr_warning("Could not create debugfs 'events' directory\n"); | 2288 | pr_warn("Could not create debugfs 'events' directory\n"); |
2290 | return -ENOMEM; | 2289 | return -ENOMEM; |
2291 | } | 2290 | } |
2292 | 2291 | ||
@@ -2462,11 +2461,10 @@ static __init int event_trace_init(void) | |||
2462 | entry = debugfs_create_file("available_events", 0444, d_tracer, | 2461 | entry = debugfs_create_file("available_events", 0444, d_tracer, |
2463 | tr, &ftrace_avail_fops); | 2462 | tr, &ftrace_avail_fops); |
2464 | if (!entry) | 2463 | if (!entry) |
2465 | pr_warning("Could not create debugfs " | 2464 | pr_warn("Could not create debugfs 'available_events' entry\n"); |
2466 | "'available_events' entry\n"); | ||
2467 | 2465 | ||
2468 | if (trace_define_common_fields()) | 2466 | if (trace_define_common_fields()) |
2469 | pr_warning("tracing: Failed to allocate common fields"); | 2467 | pr_warn("tracing: Failed to allocate common fields"); |
2470 | 2468 | ||
2471 | ret = early_event_add_tracer(d_tracer, tr); | 2469 | ret = early_event_add_tracer(d_tracer, tr); |
2472 | if (ret) | 2470 | if (ret) |
@@ -2475,7 +2473,7 @@ static __init int event_trace_init(void) | |||
2475 | #ifdef CONFIG_MODULES | 2473 | #ifdef CONFIG_MODULES |
2476 | ret = register_module_notifier(&trace_module_nb); | 2474 | ret = register_module_notifier(&trace_module_nb); |
2477 | if (ret) | 2475 | if (ret) |
2478 | pr_warning("Failed to register trace events module notifier\n"); | 2476 | pr_warn("Failed to register trace events module notifier\n"); |
2479 | #endif | 2477 | #endif |
2480 | return 0; | 2478 | return 0; |
2481 | } | 2479 | } |
@@ -2579,7 +2577,7 @@ static __init void event_trace_self_tests(void) | |||
2579 | * it and the self test should not be on. | 2577 | * it and the self test should not be on. |
2580 | */ | 2578 | */ |
2581 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { | 2579 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { |
2582 | pr_warning("Enabled event during self test!\n"); | 2580 | pr_warn("Enabled event during self test!\n"); |
2583 | WARN_ON_ONCE(1); | 2581 | WARN_ON_ONCE(1); |
2584 | continue; | 2582 | continue; |
2585 | } | 2583 | } |
@@ -2607,8 +2605,8 @@ static __init void event_trace_self_tests(void) | |||
2607 | 2605 | ||
2608 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); | 2606 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); |
2609 | if (WARN_ON_ONCE(ret)) { | 2607 | if (WARN_ON_ONCE(ret)) { |
2610 | pr_warning("error enabling system %s\n", | 2608 | pr_warn("error enabling system %s\n", |
2611 | system->name); | 2609 | system->name); |
2612 | continue; | 2610 | continue; |
2613 | } | 2611 | } |
2614 | 2612 | ||
@@ -2616,8 +2614,8 @@ static __init void event_trace_self_tests(void) | |||
2616 | 2614 | ||
2617 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); | 2615 | ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); |
2618 | if (WARN_ON_ONCE(ret)) { | 2616 | if (WARN_ON_ONCE(ret)) { |
2619 | pr_warning("error disabling system %s\n", | 2617 | pr_warn("error disabling system %s\n", |
2620 | system->name); | 2618 | system->name); |
2621 | continue; | 2619 | continue; |
2622 | } | 2620 | } |
2623 | 2621 | ||
@@ -2631,7 +2629,7 @@ static __init void event_trace_self_tests(void) | |||
2631 | 2629 | ||
2632 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); | 2630 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); |
2633 | if (WARN_ON_ONCE(ret)) { | 2631 | if (WARN_ON_ONCE(ret)) { |
2634 | pr_warning("error enabling all events\n"); | 2632 | pr_warn("error enabling all events\n"); |
2635 | return; | 2633 | return; |
2636 | } | 2634 | } |
2637 | 2635 | ||
@@ -2640,7 +2638,7 @@ static __init void event_trace_self_tests(void) | |||
2640 | /* reset sysname */ | 2638 | /* reset sysname */ |
2641 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); | 2639 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); |
2642 | if (WARN_ON_ONCE(ret)) { | 2640 | if (WARN_ON_ONCE(ret)) { |
2643 | pr_warning("error disabling all events\n"); | 2641 | pr_warn("error disabling all events\n"); |
2644 | return; | 2642 | return; |
2645 | } | 2643 | } |
2646 | 2644 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4de3e57f723c..f0a0c982cde3 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -15,6 +15,33 @@ | |||
15 | #include "trace.h" | 15 | #include "trace.h" |
16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
17 | 17 | ||
18 | static bool kill_ftrace_graph; | ||
19 | |||
20 | /** | ||
21 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | ||
22 | * | ||
23 | * ftrace_graph_stop() is called when a severe error is detected in | ||
24 | * the function graph tracing. This function is called by the critical | ||
25 | * paths of function graph to keep those paths from doing any more harm. | ||
26 | */ | ||
27 | bool ftrace_graph_is_dead(void) | ||
28 | { | ||
29 | return kill_ftrace_graph; | ||
30 | } | ||
31 | |||
32 | /** | ||
33 | * ftrace_graph_stop - set to permanently disable function graph tracincg | ||
34 | * | ||
35 | * In case of an error int function graph tracing, this is called | ||
36 | * to try to keep function graph tracing from causing any more harm. | ||
37 | * Usually this is pretty severe and this is called to try to at least | ||
38 | * get a warning out to the user. | ||
39 | */ | ||
40 | void ftrace_graph_stop(void) | ||
41 | { | ||
42 | kill_ftrace_graph = true; | ||
43 | } | ||
44 | |||
18 | /* When set, irq functions will be ignored */ | 45 | /* When set, irq functions will be ignored */ |
19 | static int ftrace_graph_skip_irqs; | 46 | static int ftrace_graph_skip_irqs; |
20 | 47 | ||
@@ -92,6 +119,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | |||
92 | unsigned long long calltime; | 119 | unsigned long long calltime; |
93 | int index; | 120 | int index; |
94 | 121 | ||
122 | if (unlikely(ftrace_graph_is_dead())) | ||
123 | return -EBUSY; | ||
124 | |||
95 | if (!current->ret_stack) | 125 | if (!current->ret_stack) |
96 | return -EBUSY; | 126 | return -EBUSY; |
97 | 127 | ||
@@ -323,7 +353,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
323 | return ret; | 353 | return ret; |
324 | } | 354 | } |
325 | 355 | ||
326 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | 356 | static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) |
327 | { | 357 | { |
328 | if (tracing_thresh) | 358 | if (tracing_thresh) |
329 | return 1; | 359 | return 1; |
@@ -412,7 +442,7 @@ void set_graph_array(struct trace_array *tr) | |||
412 | smp_mb(); | 442 | smp_mb(); |
413 | } | 443 | } |
414 | 444 | ||
415 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | 445 | static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) |
416 | { | 446 | { |
417 | if (tracing_thresh && | 447 | if (tracing_thresh && |
418 | (trace->rettime - trace->calltime < tracing_thresh)) | 448 | (trace->rettime - trace->calltime < tracing_thresh)) |
@@ -445,6 +475,12 @@ static void graph_trace_reset(struct trace_array *tr) | |||
445 | unregister_ftrace_graph(); | 475 | unregister_ftrace_graph(); |
446 | } | 476 | } |
447 | 477 | ||
478 | static int graph_trace_update_thresh(struct trace_array *tr) | ||
479 | { | ||
480 | graph_trace_reset(tr); | ||
481 | return graph_trace_init(tr); | ||
482 | } | ||
483 | |||
448 | static int max_bytes_for_cpu; | 484 | static int max_bytes_for_cpu; |
449 | 485 | ||
450 | static enum print_line_t | 486 | static enum print_line_t |
@@ -1399,7 +1435,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
1399 | seq_printf(s, " | | | |\n"); | 1435 | seq_printf(s, " | | | |\n"); |
1400 | } | 1436 | } |
1401 | 1437 | ||
1402 | void print_graph_headers(struct seq_file *s) | 1438 | static void print_graph_headers(struct seq_file *s) |
1403 | { | 1439 | { |
1404 | print_graph_headers_flags(s, tracer_flags.val); | 1440 | print_graph_headers_flags(s, tracer_flags.val); |
1405 | } | 1441 | } |
@@ -1495,6 +1531,7 @@ static struct trace_event graph_trace_ret_event = { | |||
1495 | 1531 | ||
1496 | static struct tracer graph_trace __tracer_data = { | 1532 | static struct tracer graph_trace __tracer_data = { |
1497 | .name = "function_graph", | 1533 | .name = "function_graph", |
1534 | .update_thresh = graph_trace_update_thresh, | ||
1498 | .open = graph_trace_open, | 1535 | .open = graph_trace_open, |
1499 | .pipe_open = graph_trace_open, | 1536 | .pipe_open = graph_trace_open, |
1500 | .close = graph_trace_close, | 1537 | .close = graph_trace_close, |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index f3dad80c20b2..c6977d5a9b12 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -20,23 +20,6 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
20 | 20 | ||
21 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 21 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
22 | 22 | ||
23 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
24 | { | ||
25 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
26 | int ret; | ||
27 | |||
28 | ret = seq_write(m, s->buffer, len); | ||
29 | |||
30 | /* | ||
31 | * Only reset this buffer if we successfully wrote to the | ||
32 | * seq_file buffer. | ||
33 | */ | ||
34 | if (!ret) | ||
35 | trace_seq_init(s); | ||
36 | |||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) | 23 | enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) |
41 | { | 24 | { |
42 | struct trace_seq *s = &iter->seq; | 25 | struct trace_seq *s = &iter->seq; |
@@ -85,257 +68,6 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
85 | return TRACE_TYPE_HANDLED; | 68 | return TRACE_TYPE_HANDLED; |
86 | } | 69 | } |
87 | 70 | ||
88 | /** | ||
89 | * trace_seq_printf - sequence printing of trace information | ||
90 | * @s: trace sequence descriptor | ||
91 | * @fmt: printf format string | ||
92 | * | ||
93 | * It returns 0 if the trace oversizes the buffer's free | ||
94 | * space, 1 otherwise. | ||
95 | * | ||
96 | * The tracer may use either sequence operations or its own | ||
97 | * copy to user routines. To simplify formating of a trace | ||
98 | * trace_seq_printf is used to store strings into a special | ||
99 | * buffer (@s). Then the output may be either used by | ||
100 | * the sequencer or pulled into another buffer. | ||
101 | */ | ||
102 | int | ||
103 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
104 | { | ||
105 | int len = (PAGE_SIZE - 1) - s->len; | ||
106 | va_list ap; | ||
107 | int ret; | ||
108 | |||
109 | if (s->full || !len) | ||
110 | return 0; | ||
111 | |||
112 | va_start(ap, fmt); | ||
113 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
114 | va_end(ap); | ||
115 | |||
116 | /* If we can't write it all, don't bother writing anything */ | ||
117 | if (ret >= len) { | ||
118 | s->full = 1; | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | s->len += ret; | ||
123 | |||
124 | return 1; | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(trace_seq_printf); | ||
127 | |||
128 | /** | ||
129 | * trace_seq_bitmask - put a list of longs as a bitmask print output | ||
130 | * @s: trace sequence descriptor | ||
131 | * @maskp: points to an array of unsigned longs that represent a bitmask | ||
132 | * @nmaskbits: The number of bits that are valid in @maskp | ||
133 | * | ||
134 | * It returns 0 if the trace oversizes the buffer's free | ||
135 | * space, 1 otherwise. | ||
136 | * | ||
137 | * Writes a ASCII representation of a bitmask string into @s. | ||
138 | */ | ||
139 | int | ||
140 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | ||
141 | int nmaskbits) | ||
142 | { | ||
143 | int len = (PAGE_SIZE - 1) - s->len; | ||
144 | int ret; | ||
145 | |||
146 | if (s->full || !len) | ||
147 | return 0; | ||
148 | |||
149 | ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); | ||
150 | s->len += ret; | ||
151 | |||
152 | return 1; | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); | ||
155 | |||
156 | /** | ||
157 | * trace_seq_vprintf - sequence printing of trace information | ||
158 | * @s: trace sequence descriptor | ||
159 | * @fmt: printf format string | ||
160 | * | ||
161 | * The tracer may use either sequence operations or its own | ||
162 | * copy to user routines. To simplify formating of a trace | ||
163 | * trace_seq_printf is used to store strings into a special | ||
164 | * buffer (@s). Then the output may be either used by | ||
165 | * the sequencer or pulled into another buffer. | ||
166 | */ | ||
167 | int | ||
168 | trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | ||
169 | { | ||
170 | int len = (PAGE_SIZE - 1) - s->len; | ||
171 | int ret; | ||
172 | |||
173 | if (s->full || !len) | ||
174 | return 0; | ||
175 | |||
176 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); | ||
177 | |||
178 | /* If we can't write it all, don't bother writing anything */ | ||
179 | if (ret >= len) { | ||
180 | s->full = 1; | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | s->len += ret; | ||
185 | |||
186 | return len; | ||
187 | } | ||
188 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); | ||
189 | |||
190 | int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
191 | { | ||
192 | int len = (PAGE_SIZE - 1) - s->len; | ||
193 | int ret; | ||
194 | |||
195 | if (s->full || !len) | ||
196 | return 0; | ||
197 | |||
198 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | ||
199 | |||
200 | /* If we can't write it all, don't bother writing anything */ | ||
201 | if (ret >= len) { | ||
202 | s->full = 1; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | s->len += ret; | ||
207 | |||
208 | return len; | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * trace_seq_puts - trace sequence printing of simple string | ||
213 | * @s: trace sequence descriptor | ||
214 | * @str: simple string to record | ||
215 | * | ||
216 | * The tracer may use either the sequence operations or its own | ||
217 | * copy to user routines. This function records a simple string | ||
218 | * into a special buffer (@s) for later retrieval by a sequencer | ||
219 | * or other mechanism. | ||
220 | */ | ||
221 | int trace_seq_puts(struct trace_seq *s, const char *str) | ||
222 | { | ||
223 | int len = strlen(str); | ||
224 | |||
225 | if (s->full) | ||
226 | return 0; | ||
227 | |||
228 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
229 | s->full = 1; | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | memcpy(s->buffer + s->len, str, len); | ||
234 | s->len += len; | ||
235 | |||
236 | return len; | ||
237 | } | ||
238 | |||
239 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
240 | { | ||
241 | if (s->full) | ||
242 | return 0; | ||
243 | |||
244 | if (s->len >= (PAGE_SIZE - 1)) { | ||
245 | s->full = 1; | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | s->buffer[s->len++] = c; | ||
250 | |||
251 | return 1; | ||
252 | } | ||
253 | EXPORT_SYMBOL(trace_seq_putc); | ||
254 | |||
255 | int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | ||
256 | { | ||
257 | if (s->full) | ||
258 | return 0; | ||
259 | |||
260 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
261 | s->full = 1; | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | memcpy(s->buffer + s->len, mem, len); | ||
266 | s->len += len; | ||
267 | |||
268 | return len; | ||
269 | } | ||
270 | |||
271 | int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) | ||
272 | { | ||
273 | unsigned char hex[HEX_CHARS]; | ||
274 | const unsigned char *data = mem; | ||
275 | int i, j; | ||
276 | |||
277 | if (s->full) | ||
278 | return 0; | ||
279 | |||
280 | #ifdef __BIG_ENDIAN | ||
281 | for (i = 0, j = 0; i < len; i++) { | ||
282 | #else | ||
283 | for (i = len-1, j = 0; i >= 0; i--) { | ||
284 | #endif | ||
285 | hex[j++] = hex_asc_hi(data[i]); | ||
286 | hex[j++] = hex_asc_lo(data[i]); | ||
287 | } | ||
288 | hex[j++] = ' '; | ||
289 | |||
290 | return trace_seq_putmem(s, hex, j); | ||
291 | } | ||
292 | |||
293 | void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
294 | { | ||
295 | void *ret; | ||
296 | |||
297 | if (s->full) | ||
298 | return NULL; | ||
299 | |||
300 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
301 | s->full = 1; | ||
302 | return NULL; | ||
303 | } | ||
304 | |||
305 | ret = s->buffer + s->len; | ||
306 | s->len += len; | ||
307 | |||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | int trace_seq_path(struct trace_seq *s, const struct path *path) | ||
312 | { | ||
313 | unsigned char *p; | ||
314 | |||
315 | if (s->full) | ||
316 | return 0; | ||
317 | |||
318 | if (s->len >= (PAGE_SIZE - 1)) { | ||
319 | s->full = 1; | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
324 | if (!IS_ERR(p)) { | ||
325 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
326 | if (p) { | ||
327 | s->len = p - s->buffer; | ||
328 | return 1; | ||
329 | } | ||
330 | } else { | ||
331 | s->buffer[s->len++] = '?'; | ||
332 | return 1; | ||
333 | } | ||
334 | |||
335 | s->full = 1; | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | const char * | 71 | const char * |
340 | ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | 72 | ftrace_print_flags_seq(struct trace_seq *p, const char *delim, |
341 | unsigned long flags, | 73 | unsigned long flags, |
@@ -343,7 +75,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | |||
343 | { | 75 | { |
344 | unsigned long mask; | 76 | unsigned long mask; |
345 | const char *str; | 77 | const char *str; |
346 | const char *ret = p->buffer + p->len; | 78 | const char *ret = trace_seq_buffer_ptr(p); |
347 | int i, first = 1; | 79 | int i, first = 1; |
348 | 80 | ||
349 | for (i = 0; flag_array[i].name && flags; i++) { | 81 | for (i = 0; flag_array[i].name && flags; i++) { |
@@ -379,7 +111,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
379 | const struct trace_print_flags *symbol_array) | 111 | const struct trace_print_flags *symbol_array) |
380 | { | 112 | { |
381 | int i; | 113 | int i; |
382 | const char *ret = p->buffer + p->len; | 114 | const char *ret = trace_seq_buffer_ptr(p); |
383 | 115 | ||
384 | for (i = 0; symbol_array[i].name; i++) { | 116 | for (i = 0; symbol_array[i].name; i++) { |
385 | 117 | ||
@@ -390,7 +122,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
390 | break; | 122 | break; |
391 | } | 123 | } |
392 | 124 | ||
393 | if (ret == (const char *)(p->buffer + p->len)) | 125 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) |
394 | trace_seq_printf(p, "0x%lx", val); | 126 | trace_seq_printf(p, "0x%lx", val); |
395 | 127 | ||
396 | trace_seq_putc(p, 0); | 128 | trace_seq_putc(p, 0); |
@@ -405,7 +137,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | |||
405 | const struct trace_print_flags_u64 *symbol_array) | 137 | const struct trace_print_flags_u64 *symbol_array) |
406 | { | 138 | { |
407 | int i; | 139 | int i; |
408 | const char *ret = p->buffer + p->len; | 140 | const char *ret = trace_seq_buffer_ptr(p); |
409 | 141 | ||
410 | for (i = 0; symbol_array[i].name; i++) { | 142 | for (i = 0; symbol_array[i].name; i++) { |
411 | 143 | ||
@@ -416,7 +148,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | |||
416 | break; | 148 | break; |
417 | } | 149 | } |
418 | 150 | ||
419 | if (ret == (const char *)(p->buffer + p->len)) | 151 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) |
420 | trace_seq_printf(p, "0x%llx", val); | 152 | trace_seq_printf(p, "0x%llx", val); |
421 | 153 | ||
422 | trace_seq_putc(p, 0); | 154 | trace_seq_putc(p, 0); |
@@ -430,7 +162,7 @@ const char * | |||
430 | ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | 162 | ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, |
431 | unsigned int bitmask_size) | 163 | unsigned int bitmask_size) |
432 | { | 164 | { |
433 | const char *ret = p->buffer + p->len; | 165 | const char *ret = trace_seq_buffer_ptr(p); |
434 | 166 | ||
435 | trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); | 167 | trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); |
436 | trace_seq_putc(p, 0); | 168 | trace_seq_putc(p, 0); |
@@ -443,7 +175,7 @@ const char * | |||
443 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | 175 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) |
444 | { | 176 | { |
445 | int i; | 177 | int i; |
446 | const char *ret = p->buffer + p->len; | 178 | const char *ret = trace_seq_buffer_ptr(p); |
447 | 179 | ||
448 | for (i = 0; i < buf_len; i++) | 180 | for (i = 0; i < buf_len; i++) |
449 | trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); | 181 | trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 127a9d8c8357..80b25b585a70 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -35,9 +35,6 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); | |||
35 | extern int __unregister_ftrace_event(struct trace_event *event); | 35 | extern int __unregister_ftrace_event(struct trace_event *event); |
36 | extern struct rw_semaphore trace_event_sem; | 36 | extern struct rw_semaphore trace_event_sem; |
37 | 37 | ||
38 | #define MAX_MEMHEX_BYTES 8 | ||
39 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
40 | |||
41 | #define SEQ_PUT_FIELD_RET(s, x) \ | 38 | #define SEQ_PUT_FIELD_RET(s, x) \ |
42 | do { \ | 39 | do { \ |
43 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | 40 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ |
@@ -46,7 +43,6 @@ do { \ | |||
46 | 43 | ||
47 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | 44 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ |
48 | do { \ | 45 | do { \ |
49 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
50 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | 46 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ |
51 | return TRACE_TYPE_PARTIAL_LINE; \ | 47 | return TRACE_TYPE_PARTIAL_LINE; \ |
52 | } while (0) | 48 | } while (0) |
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c new file mode 100644 index 000000000000..1f24ed99dca2 --- /dev/null +++ b/kernel/trace/trace_seq.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * trace_seq.c | ||
3 | * | ||
4 | * Copyright (C) 2008-2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | * The trace_seq is a handy tool that allows you to pass a descriptor around | ||
7 | * to a buffer that other functions can write to. It is similar to the | ||
8 | * seq_file functionality but has some differences. | ||
9 | * | ||
10 | * To use it, the trace_seq must be initialized with trace_seq_init(). | ||
11 | * This will set up the counters within the descriptor. You can call | ||
12 | * trace_seq_init() more than once to reset the trace_seq to start | ||
13 | * from scratch. | ||
14 | * | ||
15 | * The buffer size is currently PAGE_SIZE, although it may become dynamic | ||
16 | * in the future. | ||
17 | * | ||
18 | * A write to the buffer will either succed or fail. That is, unlike | ||
19 | * sprintf() there will not be a partial write (well it may write into | ||
20 | * the buffer but it wont update the pointers). This allows users to | ||
21 | * try to write something into the trace_seq buffer and if it fails | ||
22 | * they can flush it and try again. | ||
23 | * | ||
24 | */ | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/seq_file.h> | ||
27 | #include <linux/trace_seq.h> | ||
28 | |||
29 | /* How much buffer is left on the trace_seq? */ | ||
30 | #define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len) | ||
31 | |||
32 | /* How much buffer is written? */ | ||
33 | #define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1)) | ||
34 | |||
35 | /** | ||
36 | * trace_print_seq - move the contents of trace_seq into a seq_file | ||
37 | * @m: the seq_file descriptor that is the destination | ||
38 | * @s: the trace_seq descriptor that is the source. | ||
39 | * | ||
40 | * Returns 0 on success and non zero on error. If it succeeds to | ||
41 | * write to the seq_file it will reset the trace_seq, otherwise | ||
42 | * it does not modify the trace_seq to let the caller try again. | ||
43 | */ | ||
44 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
45 | { | ||
46 | unsigned int len = TRACE_SEQ_BUF_USED(s); | ||
47 | int ret; | ||
48 | |||
49 | ret = seq_write(m, s->buffer, len); | ||
50 | |||
51 | /* | ||
52 | * Only reset this buffer if we successfully wrote to the | ||
53 | * seq_file buffer. This lets the caller try again or | ||
54 | * do something else with the contents. | ||
55 | */ | ||
56 | if (!ret) | ||
57 | trace_seq_init(s); | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * trace_seq_printf - sequence printing of trace information | ||
64 | * @s: trace sequence descriptor | ||
65 | * @fmt: printf format string | ||
66 | * | ||
67 | * The tracer may use either sequence operations or its own | ||
68 | * copy to user routines. To simplify formating of a trace | ||
69 | * trace_seq_printf() is used to store strings into a special | ||
70 | * buffer (@s). Then the output may be either used by | ||
71 | * the sequencer or pulled into another buffer. | ||
72 | * | ||
73 | * Returns 1 if we successfully written all the contents to | ||
74 | * the buffer. | ||
75 | * Returns 0 if we the length to write is bigger than the | ||
76 | * reserved buffer space. In this case, nothing gets written. | ||
77 | */ | ||
78 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
79 | { | ||
80 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | ||
81 | va_list ap; | ||
82 | int ret; | ||
83 | |||
84 | if (s->full || !len) | ||
85 | return 0; | ||
86 | |||
87 | va_start(ap, fmt); | ||
88 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
89 | va_end(ap); | ||
90 | |||
91 | /* If we can't write it all, don't bother writing anything */ | ||
92 | if (ret >= len) { | ||
93 | s->full = 1; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | s->len += ret; | ||
98 | |||
99 | return 1; | ||
100 | } | ||
101 | EXPORT_SYMBOL_GPL(trace_seq_printf); | ||
102 | |||
103 | /** | ||
104 | * trace_seq_bitmask - write a bitmask array in its ASCII representation | ||
105 | * @s: trace sequence descriptor | ||
106 | * @maskp: points to an array of unsigned longs that represent a bitmask | ||
107 | * @nmaskbits: The number of bits that are valid in @maskp | ||
108 | * | ||
109 | * Writes a ASCII representation of a bitmask string into @s. | ||
110 | * | ||
111 | * Returns 1 if we successfully written all the contents to | ||
112 | * the buffer. | ||
113 | * Returns 0 if we the length to write is bigger than the | ||
114 | * reserved buffer space. In this case, nothing gets written. | ||
115 | */ | ||
116 | int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | ||
117 | int nmaskbits) | ||
118 | { | ||
119 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | ||
120 | int ret; | ||
121 | |||
122 | if (s->full || !len) | ||
123 | return 0; | ||
124 | |||
125 | ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); | ||
126 | s->len += ret; | ||
127 | |||
128 | return 1; | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); | ||
131 | |||
132 | /** | ||
133 | * trace_seq_vprintf - sequence printing of trace information | ||
134 | * @s: trace sequence descriptor | ||
135 | * @fmt: printf format string | ||
136 | * | ||
137 | * The tracer may use either sequence operations or its own | ||
138 | * copy to user routines. To simplify formating of a trace | ||
139 | * trace_seq_printf is used to store strings into a special | ||
140 | * buffer (@s). Then the output may be either used by | ||
141 | * the sequencer or pulled into another buffer. | ||
142 | * | ||
143 | * Returns how much it wrote to the buffer. | ||
144 | */ | ||
145 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | ||
146 | { | ||
147 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | ||
148 | int ret; | ||
149 | |||
150 | if (s->full || !len) | ||
151 | return 0; | ||
152 | |||
153 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); | ||
154 | |||
155 | /* If we can't write it all, don't bother writing anything */ | ||
156 | if (ret >= len) { | ||
157 | s->full = 1; | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | s->len += ret; | ||
162 | |||
163 | return len; | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); | ||
166 | |||
167 | /** | ||
168 | * trace_seq_bprintf - Write the printf string from binary arguments | ||
169 | * @s: trace sequence descriptor | ||
170 | * @fmt: The format string for the @binary arguments | ||
171 | * @binary: The binary arguments for @fmt. | ||
172 | * | ||
173 | * When recording in a fast path, a printf may be recorded with just | ||
174 | * saving the format and the arguments as they were passed to the | ||
175 | * function, instead of wasting cycles converting the arguments into | ||
176 | * ASCII characters. Instead, the arguments are saved in a 32 bit | ||
177 | * word array that is defined by the format string constraints. | ||
178 | * | ||
179 | * This function will take the format and the binary array and finish | ||
180 | * the conversion into the ASCII string within the buffer. | ||
181 | * | ||
182 | * Returns how much it wrote to the buffer. | ||
183 | */ | ||
184 | int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
185 | { | ||
186 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | ||
187 | int ret; | ||
188 | |||
189 | if (s->full || !len) | ||
190 | return 0; | ||
191 | |||
192 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | ||
193 | |||
194 | /* If we can't write it all, don't bother writing anything */ | ||
195 | if (ret >= len) { | ||
196 | s->full = 1; | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | s->len += ret; | ||
201 | |||
202 | return len; | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(trace_seq_bprintf); | ||
205 | |||
206 | /** | ||
207 | * trace_seq_puts - trace sequence printing of simple string | ||
208 | * @s: trace sequence descriptor | ||
209 | * @str: simple string to record | ||
210 | * | ||
211 | * The tracer may use either the sequence operations or its own | ||
212 | * copy to user routines. This function records a simple string | ||
213 | * into a special buffer (@s) for later retrieval by a sequencer | ||
214 | * or other mechanism. | ||
215 | * | ||
216 | * Returns how much it wrote to the buffer. | ||
217 | */ | ||
218 | int trace_seq_puts(struct trace_seq *s, const char *str) | ||
219 | { | ||
220 | unsigned int len = strlen(str); | ||
221 | |||
222 | if (s->full) | ||
223 | return 0; | ||
224 | |||
225 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | ||
226 | s->full = 1; | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | memcpy(s->buffer + s->len, str, len); | ||
231 | s->len += len; | ||
232 | |||
233 | return len; | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(trace_seq_puts); | ||
236 | |||
237 | /** | ||
238 | * trace_seq_putc - trace sequence printing of simple character | ||
239 | * @s: trace sequence descriptor | ||
240 | * @c: simple character to record | ||
241 | * | ||
242 | * The tracer may use either the sequence operations or its own | ||
243 | * copy to user routines. This function records a simple charater | ||
244 | * into a special buffer (@s) for later retrieval by a sequencer | ||
245 | * or other mechanism. | ||
246 | * | ||
247 | * Returns how much it wrote to the buffer. | ||
248 | */ | ||
249 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
250 | { | ||
251 | if (s->full) | ||
252 | return 0; | ||
253 | |||
254 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { | ||
255 | s->full = 1; | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | s->buffer[s->len++] = c; | ||
260 | |||
261 | return 1; | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(trace_seq_putc); | ||
264 | |||
265 | /** | ||
266 | * trace_seq_putmem - write raw data into the trace_seq buffer | ||
267 | * @s: trace sequence descriptor | ||
268 | * @mem: The raw memory to copy into the buffer | ||
269 | * @len: The length of the raw memory to copy (in bytes) | ||
270 | * | ||
271 | * There may be cases where raw memory needs to be written into the | ||
272 | * buffer and a strcpy() would not work. Using this function allows | ||
273 | * for such cases. | ||
274 | * | ||
275 | * Returns how much it wrote to the buffer. | ||
276 | */ | ||
277 | int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | ||
278 | { | ||
279 | if (s->full) | ||
280 | return 0; | ||
281 | |||
282 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | ||
283 | s->full = 1; | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | memcpy(s->buffer + s->len, mem, len); | ||
288 | s->len += len; | ||
289 | |||
290 | return len; | ||
291 | } | ||
292 | EXPORT_SYMBOL_GPL(trace_seq_putmem); | ||
293 | |||
294 | #define MAX_MEMHEX_BYTES 8U | ||
295 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
296 | |||
297 | /** | ||
298 | * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex | ||
299 | * @s: trace sequence descriptor | ||
300 | * @mem: The raw memory to write its hex ASCII representation of | ||
301 | * @len: The length of the raw memory to copy (in bytes) | ||
302 | * | ||
303 | * This is similar to trace_seq_putmem() except instead of just copying the | ||
304 | * raw memory into the buffer it writes its ASCII representation of it | ||
305 | * in hex characters. | ||
306 | * | ||
307 | * Returns how much it wrote to the buffer. | ||
308 | */ | ||
309 | int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
310 | unsigned int len) | ||
311 | { | ||
312 | unsigned char hex[HEX_CHARS]; | ||
313 | const unsigned char *data = mem; | ||
314 | unsigned int start_len; | ||
315 | int i, j; | ||
316 | int cnt = 0; | ||
317 | |||
318 | if (s->full) | ||
319 | return 0; | ||
320 | |||
321 | while (len) { | ||
322 | start_len = min(len, HEX_CHARS - 1); | ||
323 | #ifdef __BIG_ENDIAN | ||
324 | for (i = 0, j = 0; i < start_len; i++) { | ||
325 | #else | ||
326 | for (i = start_len-1, j = 0; i >= 0; i--) { | ||
327 | #endif | ||
328 | hex[j++] = hex_asc_hi(data[i]); | ||
329 | hex[j++] = hex_asc_lo(data[i]); | ||
330 | } | ||
331 | if (WARN_ON_ONCE(j == 0 || j/2 > len)) | ||
332 | break; | ||
333 | |||
334 | /* j increments twice per loop */ | ||
335 | len -= j / 2; | ||
336 | hex[j++] = ' '; | ||
337 | |||
338 | cnt += trace_seq_putmem(s, hex, j); | ||
339 | } | ||
340 | return cnt; | ||
341 | } | ||
342 | EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); | ||
343 | |||
344 | /** | ||
345 | * trace_seq_path - copy a path into the sequence buffer | ||
346 | * @s: trace sequence descriptor | ||
347 | * @path: path to write into the sequence buffer. | ||
348 | * | ||
349 | * Write a path name into the sequence buffer. | ||
350 | * | ||
351 | * Returns 1 if we successfully written all the contents to | ||
352 | * the buffer. | ||
353 | * Returns 0 if we the length to write is bigger than the | ||
354 | * reserved buffer space. In this case, nothing gets written. | ||
355 | */ | ||
356 | int trace_seq_path(struct trace_seq *s, const struct path *path) | ||
357 | { | ||
358 | unsigned char *p; | ||
359 | |||
360 | if (s->full) | ||
361 | return 0; | ||
362 | |||
363 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { | ||
364 | s->full = 1; | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
369 | if (!IS_ERR(p)) { | ||
370 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
371 | if (p) { | ||
372 | s->len = p - s->buffer; | ||
373 | return 1; | ||
374 | } | ||
375 | } else { | ||
376 | s->buffer[s->len++] = '?'; | ||
377 | return 1; | ||
378 | } | ||
379 | |||
380 | s->full = 1; | ||
381 | return 0; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(trace_seq_path); | ||
384 | |||
385 | /** | ||
386 | * trace_seq_to_user - copy the squence buffer to user space | ||
387 | * @s: trace sequence descriptor | ||
388 | * @ubuf: The userspace memory location to copy to | ||
389 | * @cnt: The amount to copy | ||
390 | * | ||
391 | * Copies the sequence buffer into the userspace memory pointed to | ||
392 | * by @ubuf. It starts from the last read position (@s->readpos) | ||
393 | * and writes up to @cnt characters or till it reaches the end of | ||
394 | * the content in the buffer (@s->len), which ever comes first. | ||
395 | * | ||
396 | * On success, it returns a positive number of the number of bytes | ||
397 | * it copied. | ||
398 | * | ||
399 | * On failure it returns -EBUSY if all of the content in the | ||
400 | * sequence has been already read, which includes nothing in the | ||
401 | * sequenc (@s->len == @s->readpos). | ||
402 | * | ||
403 | * Returns -EFAULT if the copy to userspace fails. | ||
404 | */ | ||
405 | int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) | ||
406 | { | ||
407 | int len; | ||
408 | int ret; | ||
409 | |||
410 | if (!cnt) | ||
411 | return 0; | ||
412 | |||
413 | if (s->len <= s->readpos) | ||
414 | return -EBUSY; | ||
415 | |||
416 | len = s->len - s->readpos; | ||
417 | if (cnt > len) | ||
418 | cnt = len; | ||
419 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | ||
420 | if (ret == cnt) | ||
421 | return -EFAULT; | ||
422 | |||
423 | cnt -= ret; | ||
424 | |||
425 | s->readpos += cnt; | ||
426 | return cnt; | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(trace_seq_to_user); | ||
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h index 4b0113f73ee9..476429281389 100644 --- a/samples/trace_events/trace-events-sample.h +++ b/samples/trace_events/trace-events-sample.h | |||
@@ -87,7 +87,7 @@ TRACE_EVENT(foo_bar, | |||
87 | ), | 87 | ), |
88 | 88 | ||
89 | TP_fast_assign( | 89 | TP_fast_assign( |
90 | strncpy(__entry->foo, foo, 10); | 90 | strlcpy(__entry->foo, foo, 10); |
91 | __entry->bar = bar; | 91 | __entry->bar = bar; |
92 | ), | 92 | ), |
93 | 93 | ||