diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 23:04:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 23:04:15 -0500 |
commit | c17488d06666153a14dd3f21bd10eba58383f6c1 (patch) | |
tree | 8a8dfaa2b2692f8b4eb20fe7e4266036f692fbdc | |
parent | 34a9304a96d6351c2d35dcdc9293258378fc0bd8 (diff) | |
parent | 5156dca34a3e1e1edac2d0dabf43d8632909b7aa (diff) |
Merge tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"Not much new with tracing for this release. Mostly just clean ups and
minor fixes.
Here's what else is new:
- A new TRACE_EVENT_FN_COND macro, combining both _FN and _COND for
those that want both.
- New selftest to test the instance create and delete
- Better debug output when ftrace fails"
* tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (24 commits)
ftrace: Fix the race between ftrace and insmod
ftrace: Add infrastructure for delayed enabling of module functions
x86: ftrace: Fix the comments for ftrace_modify_code_direct()
tracing: Fix comment to use tracing_on over tracing_enable
metag: ftrace: Fix the comments for ftrace_modify_code
sh: ftrace: Fix the comments for ftrace_modify_code()
ia64: ftrace: Fix the comments for ftrace_modify_code()
ftrace: Clean up ftrace_module_init() code
ftrace: Join functions ftrace_module_init() and ftrace_init_module()
tracing: Introduce TRACE_EVENT_FN_COND macro
tracing: Use seq_buf_used() in seq_buf_to_user() instead of len
bpf: Constify bpf_verifier_ops structure
ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too
ftrace: Remove use of control list and ops
ftrace: Fix output of enabled_functions for showing tramp
ftrace: Fix a typo in comment
ftrace: Show all tramps registered to a record on ftrace_bug()
ftrace: Add variable ftrace_expected for archs to show expected code
ftrace: Add new type to distinguish what kind of ftrace_bug()
tracing: Update cond flag when enabling or disabling a trigger
...
-rw-r--r-- | arch/ia64/kernel/ftrace.c | 12 | ||||
-rw-r--r-- | arch/metag/kernel/ftrace.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/ftrace.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 21 | ||||
-rw-r--r-- | include/linux/ftrace.h | 56 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 4 | ||||
-rw-r--r-- | include/trace/define_trace.h | 6 | ||||
-rw-r--r-- | include/trace/trace_events.h | 6 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 451 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 57 | ||||
-rw-r--r-- | kernel/trace/trace.h | 6 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events_trigger.c | 10 | ||||
-rw-r--r-- | lib/seq_buf.c | 6 | ||||
-rw-r--r-- | tools/testing/selftests/ftrace/test.d/instances/instance.tc | 90 |
16 files changed, 476 insertions, 276 deletions
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index 3b0c2aa07857..cee411e647ca 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c | |||
@@ -97,13 +97,11 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
97 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 97 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * Note: Due to modules and __init, code can | 100 | * Note: |
101 | * disappear and change, we need to protect against faulting | 101 | * We are paranoid about modifying text, as if a bug was to happen, it |
102 | * as well as code changing. We do this by using the | 102 | * could cause us to read or write to someplace that could cause harm. |
103 | * probe_kernel_* functions. | 103 | * Carefully read and modify the code with probe_kernel_*(), and make |
104 | * | 104 | * sure what we read is what we expected it to be before modifying it. |
105 | * No real locking needed, this code is run through | ||
106 | * kstop_machine, or before SMP starts. | ||
107 | */ | 105 | */ |
108 | 106 | ||
109 | if (!do_check) | 107 | if (!do_check) |
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c index ed1d685157c2..ac8c039b0318 100644 --- a/arch/metag/kernel/ftrace.c +++ b/arch/metag/kernel/ftrace.c | |||
@@ -54,12 +54,11 @@ static int ftrace_modify_code(unsigned long pc, unsigned char *old_code, | |||
54 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 54 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Note: Due to modules and __init, code can | 57 | * Note: |
58 | * disappear and change, we need to protect against faulting | 58 | * We are paranoid about modifying text, as if a bug was to happen, it |
59 | * as well as code changing. | 59 | * could cause us to read or write to someplace that could cause harm. |
60 | * | 60 | * Carefully read and modify the code with probe_kernel_*(), and make |
61 | * No real locking needed, this code is run through | 61 | * sure what we read is what we expected it to be before modifying it. |
62 | * kstop_machine. | ||
63 | */ | 62 | */ |
64 | 63 | ||
65 | /* read the text we want to modify */ | 64 | /* read the text we want to modify */ |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 079d70e6d74b..38993e09ef03 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -212,13 +212,11 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
212 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 212 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Note: Due to modules and __init, code can | 215 | * Note: |
216 | * disappear and change, we need to protect against faulting | 216 | * We are paranoid about modifying text, as if a bug was to happen, it |
217 | * as well as code changing. We do this by using the | 217 | * could cause us to read or write to someplace that could cause harm. |
218 | * probe_kernel_* functions. | 218 | * Carefully read and modify the code with probe_kernel_*(), and make |
219 | * | 219 | * sure what we read is what we expected it to be before modifying it. |
220 | * No real locking needed, this code is run through | ||
221 | * kstop_machine, or before SMP starts. | ||
222 | */ | 220 | */ |
223 | 221 | ||
224 | /* read the text we want to modify */ | 222 | /* read the text we want to modify */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 311bcf338f07..29408d6d6626 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -105,14 +105,14 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, | |||
105 | { | 105 | { |
106 | unsigned char replaced[MCOUNT_INSN_SIZE]; | 106 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
107 | 107 | ||
108 | ftrace_expected = old_code; | ||
109 | |||
108 | /* | 110 | /* |
109 | * Note: Due to modules and __init, code can | 111 | * Note: |
110 | * disappear and change, we need to protect against faulting | 112 | * We are paranoid about modifying text, as if a bug was to happen, it |
111 | * as well as code changing. We do this by using the | 113 | * could cause us to read or write to someplace that could cause harm. |
112 | * probe_kernel_* functions. | 114 | * Carefully read and modify the code with probe_kernel_*(), and make |
113 | * | 115 | * sure what we read is what we expected it to be before modifying it. |
114 | * No real locking needed, this code is run through | ||
115 | * kstop_machine, or before SMP starts. | ||
116 | */ | 116 | */ |
117 | 117 | ||
118 | /* read the text we want to modify */ | 118 | /* read the text we want to modify */ |
@@ -154,6 +154,8 @@ int ftrace_make_nop(struct module *mod, | |||
154 | if (addr == MCOUNT_ADDR) | 154 | if (addr == MCOUNT_ADDR) |
155 | return ftrace_modify_code_direct(rec->ip, old, new); | 155 | return ftrace_modify_code_direct(rec->ip, old, new); |
156 | 156 | ||
157 | ftrace_expected = NULL; | ||
158 | |||
157 | /* Normal cases use add_brk_on_nop */ | 159 | /* Normal cases use add_brk_on_nop */ |
158 | WARN_ONCE(1, "invalid use of ftrace_make_nop"); | 160 | WARN_ONCE(1, "invalid use of ftrace_make_nop"); |
159 | return -EINVAL; | 161 | return -EINVAL; |
@@ -220,6 +222,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |||
220 | unsigned long addr) | 222 | unsigned long addr) |
221 | { | 223 | { |
222 | WARN_ON(1); | 224 | WARN_ON(1); |
225 | ftrace_expected = NULL; | ||
223 | return -EINVAL; | 226 | return -EINVAL; |
224 | } | 227 | } |
225 | 228 | ||
@@ -314,6 +317,8 @@ static int add_break(unsigned long ip, const char *old) | |||
314 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | 317 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
315 | return -EFAULT; | 318 | return -EFAULT; |
316 | 319 | ||
320 | ftrace_expected = old; | ||
321 | |||
317 | /* Make sure it is what we expect it to be */ | 322 | /* Make sure it is what we expect it to be */ |
318 | if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0) | 323 | if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0) |
319 | return -EINVAL; | 324 | return -EINVAL; |
@@ -413,6 +418,8 @@ static int remove_breakpoint(struct dyn_ftrace *rec) | |||
413 | ftrace_addr = ftrace_get_addr_curr(rec); | 418 | ftrace_addr = ftrace_get_addr_curr(rec); |
414 | nop = ftrace_call_replace(ip, ftrace_addr); | 419 | nop = ftrace_call_replace(ip, ftrace_addr); |
415 | 420 | ||
421 | ftrace_expected = nop; | ||
422 | |||
416 | if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) | 423 | if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) |
417 | return -EINVAL; | 424 | return -EINVAL; |
418 | } | 425 | } |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 60048c50404e..0639dcc98195 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
78 | * allocated ftrace_ops which need special care | 78 | * allocated ftrace_ops which need special care |
79 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | 79 | * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops |
80 | * could be controled by following calls: | 80 | * could be controlled by following calls: |
81 | * ftrace_function_local_enable | 81 | * ftrace_function_local_enable |
82 | * ftrace_function_local_disable | 82 | * ftrace_function_local_disable |
83 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called | 83 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
@@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
121 | enum { | 121 | enum { |
122 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 122 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
123 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, | 123 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
124 | FTRACE_OPS_FL_CONTROL = 1 << 2, | 124 | FTRACE_OPS_FL_PER_CPU = 1 << 2, |
125 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, | 125 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, |
126 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, | 126 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, |
127 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, | 127 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, |
@@ -134,6 +134,7 @@ enum { | |||
134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | 134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, |
135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | 135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, |
136 | FTRACE_OPS_FL_PID = 1 << 14, | 136 | FTRACE_OPS_FL_PID = 1 << 14, |
137 | FTRACE_OPS_FL_RCU = 1 << 15, | ||
137 | }; | 138 | }; |
138 | 139 | ||
139 | #ifdef CONFIG_DYNAMIC_FTRACE | 140 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -146,11 +147,11 @@ struct ftrace_ops_hash { | |||
146 | #endif | 147 | #endif |
147 | 148 | ||
148 | /* | 149 | /* |
149 | * Note, ftrace_ops can be referenced outside of RCU protection. | 150 | * Note, ftrace_ops can be referenced outside of RCU protection, unless |
150 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | 151 | * the RCU flag is set. If ftrace_ops is allocated and not part of kernel |
151 | * allocated and not part of kernel core data, the unregistering of it will | 152 | * core data, the unregistering of it will perform a scheduling on all CPUs |
152 | * perform a scheduling on all CPUs to make sure that there are no more users. | 153 | * to make sure that there are no more users. Depending on the load of the |
153 | * Depending on the load of the system that may take a bit of time. | 154 | * system that may take a bit of time. |
154 | * | 155 | * |
155 | * Any private data added must also take care not to be freed and if private | 156 | * Any private data added must also take care not to be freed and if private |
156 | * data is added to a ftrace_ops that is in core code, the user of the | 157 | * data is added to a ftrace_ops that is in core code, the user of the |
@@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops); | |||
196 | void clear_ftrace_function(void); | 197 | void clear_ftrace_function(void); |
197 | 198 | ||
198 | /** | 199 | /** |
199 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu | 200 | * ftrace_function_local_enable - enable ftrace_ops on current cpu |
200 | * | 201 | * |
201 | * This function enables tracing on current cpu by decreasing | 202 | * This function enables tracing on current cpu by decreasing |
202 | * the per cpu control variable. | 203 | * the per cpu control variable. |
203 | * It must be called with preemption disabled and only on ftrace_ops | 204 | * It must be called with preemption disabled and only on ftrace_ops |
204 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 205 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
205 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 206 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
206 | */ | 207 | */ |
207 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | 208 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) |
208 | { | 209 | { |
209 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 210 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) |
210 | return; | 211 | return; |
211 | 212 | ||
212 | (*this_cpu_ptr(ops->disabled))--; | 213 | (*this_cpu_ptr(ops->disabled))--; |
213 | } | 214 | } |
214 | 215 | ||
215 | /** | 216 | /** |
216 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu | 217 | * ftrace_function_local_disable - disable ftrace_ops on current cpu |
217 | * | 218 | * |
218 | * This function enables tracing on current cpu by decreasing | 219 | * This function disables tracing on current cpu by increasing |
219 | * the per cpu control variable. | 220 | * the per cpu control variable. |
220 | * It must be called with preemption disabled and only on ftrace_ops | 221 | * It must be called with preemption disabled and only on ftrace_ops |
221 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 222 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
222 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 223 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
223 | */ | 224 | */ |
224 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | 225 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) |
225 | { | 226 | { |
226 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 227 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) |
227 | return; | 228 | return; |
228 | 229 | ||
229 | (*this_cpu_ptr(ops->disabled))++; | 230 | (*this_cpu_ptr(ops->disabled))++; |
@@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | |||
235 | * | 236 | * |
236 | * This function returns value of ftrace_ops::disabled on current cpu. | 237 | * This function returns value of ftrace_ops::disabled on current cpu. |
237 | * It must be called with preemption disabled and only on ftrace_ops | 238 | * It must be called with preemption disabled and only on ftrace_ops |
238 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 239 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
239 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 240 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
240 | */ | 241 | */ |
241 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | 242 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) |
242 | { | 243 | { |
243 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); | 244 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); |
244 | return *this_cpu_ptr(ops->disabled); | 245 | return *this_cpu_ptr(ops->disabled); |
245 | } | 246 | } |
246 | 247 | ||
@@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void); | |||
296 | 297 | ||
297 | struct dyn_ftrace; | 298 | struct dyn_ftrace; |
298 | 299 | ||
300 | enum ftrace_bug_type { | ||
301 | FTRACE_BUG_UNKNOWN, | ||
302 | FTRACE_BUG_INIT, | ||
303 | FTRACE_BUG_NOP, | ||
304 | FTRACE_BUG_CALL, | ||
305 | FTRACE_BUG_UPDATE, | ||
306 | }; | ||
307 | extern enum ftrace_bug_type ftrace_bug_type; | ||
308 | |||
309 | /* | ||
310 | * Archs can set this to point to a variable that holds the value that was | ||
311 | * expected at the call site before calling ftrace_bug(). | ||
312 | */ | ||
313 | extern const void *ftrace_expected; | ||
314 | |||
299 | void ftrace_bug(int err, struct dyn_ftrace *rec); | 315 | void ftrace_bug(int err, struct dyn_ftrace *rec); |
300 | 316 | ||
301 | struct seq_file; | 317 | struct seq_file; |
@@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr); | |||
341 | * REGS - the record wants the function to save regs | 357 | * REGS - the record wants the function to save regs |
342 | * REGS_EN - the function is set up to save regs. | 358 | * REGS_EN - the function is set up to save regs. |
343 | * IPMODIFY - the record allows for the IP address to be changed. | 359 | * IPMODIFY - the record allows for the IP address to be changed. |
360 | * DISABLED - the record is not ready to be touched yet | ||
344 | * | 361 | * |
345 | * When a new ftrace_ops is registered and wants a function to save | 362 | * When a new ftrace_ops is registered and wants a function to save |
346 | * pt_regs, the rec->flag REGS is set. When the function has been | 363 | * pt_regs, the rec->flag REGS is set. When the function has been |
@@ -355,10 +372,11 @@ enum { | |||
355 | FTRACE_FL_TRAMP = (1UL << 28), | 372 | FTRACE_FL_TRAMP = (1UL << 28), |
356 | FTRACE_FL_TRAMP_EN = (1UL << 27), | 373 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
357 | FTRACE_FL_IPMODIFY = (1UL << 26), | 374 | FTRACE_FL_IPMODIFY = (1UL << 26), |
375 | FTRACE_FL_DISABLED = (1UL << 25), | ||
358 | }; | 376 | }; |
359 | 377 | ||
360 | #define FTRACE_REF_MAX_SHIFT 26 | 378 | #define FTRACE_REF_MAX_SHIFT 25 |
361 | #define FTRACE_FL_BITS 6 | 379 | #define FTRACE_FL_BITS 7 |
362 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | 380 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
363 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | 381 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) |
364 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | 382 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 78e8397a1800..acd522a91539 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -479,6 +479,10 @@ extern void syscall_unregfunc(void); | |||
479 | #define TRACE_EVENT_FN(name, proto, args, struct, \ | 479 | #define TRACE_EVENT_FN(name, proto, args, struct, \ |
480 | assign, print, reg, unreg) \ | 480 | assign, print, reg, unreg) \ |
481 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 481 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
482 | #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ | ||
483 | assign, print, reg, unreg) \ | ||
484 | DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ | ||
485 | PARAMS(args), PARAMS(cond)) | ||
482 | #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ | 486 | #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ |
483 | struct, assign, print) \ | 487 | struct, assign, print) \ |
484 | DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ | 488 | DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ |
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 2d8639ea64d5..6e3945f64102 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
@@ -40,6 +40,11 @@ | |||
40 | assign, print, reg, unreg) \ | 40 | assign, print, reg, unreg) \ |
41 | DEFINE_TRACE_FN(name, reg, unreg) | 41 | DEFINE_TRACE_FN(name, reg, unreg) |
42 | 42 | ||
43 | #undef TRACE_EVENT_FN_COND | ||
44 | #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ | ||
45 | assign, print, reg, unreg) \ | ||
46 | DEFINE_TRACE_FN(name, reg, unreg) | ||
47 | |||
43 | #undef DEFINE_EVENT | 48 | #undef DEFINE_EVENT |
44 | #define DEFINE_EVENT(template, name, proto, args) \ | 49 | #define DEFINE_EVENT(template, name, proto, args) \ |
45 | DEFINE_TRACE(name) | 50 | DEFINE_TRACE(name) |
@@ -93,6 +98,7 @@ | |||
93 | 98 | ||
94 | #undef TRACE_EVENT | 99 | #undef TRACE_EVENT |
95 | #undef TRACE_EVENT_FN | 100 | #undef TRACE_EVENT_FN |
101 | #undef TRACE_EVENT_FN_COND | ||
96 | #undef TRACE_EVENT_CONDITION | 102 | #undef TRACE_EVENT_CONDITION |
97 | #undef DECLARE_EVENT_CLASS | 103 | #undef DECLARE_EVENT_CLASS |
98 | #undef DEFINE_EVENT | 104 | #undef DEFINE_EVENT |
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index de996cf61053..170c93bbdbb7 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h | |||
@@ -123,6 +123,12 @@ TRACE_MAKE_SYSTEM_STR(); | |||
123 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | 123 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ |
124 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | 124 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ |
125 | 125 | ||
126 | #undef TRACE_EVENT_FN_COND | ||
127 | #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ | ||
128 | assign, print, reg, unreg) \ | ||
129 | TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ | ||
130 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | ||
131 | |||
126 | #undef TRACE_EVENT_FLAGS | 132 | #undef TRACE_EVENT_FLAGS |
127 | #define TRACE_EVENT_FLAGS(name, value) \ | 133 | #define TRACE_EVENT_FLAGS(name, value) \ |
128 | __TRACE_EVENT_FLAGS(name, value) | 134 | __TRACE_EVENT_FLAGS(name, value) |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 4228fd3682c3..45dd798bcd37 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -316,7 +316,7 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type | |||
316 | return true; | 316 | return true; |
317 | } | 317 | } |
318 | 318 | ||
319 | static struct bpf_verifier_ops kprobe_prog_ops = { | 319 | static const struct bpf_verifier_ops kprobe_prog_ops = { |
320 | .get_func_proto = kprobe_prog_func_proto, | 320 | .get_func_proto = kprobe_prog_func_proto, |
321 | .is_valid_access = kprobe_prog_is_valid_access, | 321 | .is_valid_access = kprobe_prog_is_valid_access, |
322 | }; | 322 | }; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3f743b147247..eca592f977b2 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -62,8 +62,6 @@ | |||
62 | #define FTRACE_HASH_DEFAULT_BITS 10 | 62 | #define FTRACE_HASH_DEFAULT_BITS 10 |
63 | #define FTRACE_HASH_MAX_BITS 12 | 63 | #define FTRACE_HASH_MAX_BITS 12 |
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) | ||
66 | |||
67 | #ifdef CONFIG_DYNAMIC_FTRACE | 65 | #ifdef CONFIG_DYNAMIC_FTRACE |
68 | #define INIT_OPS_HASH(opsname) \ | 66 | #define INIT_OPS_HASH(opsname) \ |
69 | .func_hash = &opsname.local_hash, \ | 67 | .func_hash = &opsname.local_hash, \ |
@@ -113,14 +111,9 @@ static int ftrace_disabled __read_mostly; | |||
113 | 111 | ||
114 | static DEFINE_MUTEX(ftrace_lock); | 112 | static DEFINE_MUTEX(ftrace_lock); |
115 | 113 | ||
116 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | ||
117 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 114 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
118 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 115 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
119 | static struct ftrace_ops global_ops; | 116 | static struct ftrace_ops global_ops; |
120 | static struct ftrace_ops control_ops; | ||
121 | |||
122 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | ||
123 | struct ftrace_ops *op, struct pt_regs *regs); | ||
124 | 117 | ||
125 | #if ARCH_SUPPORTS_FTRACE_OPS | 118 | #if ARCH_SUPPORTS_FTRACE_OPS |
126 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 119 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
@@ -203,7 +196,7 @@ void clear_ftrace_function(void) | |||
203 | ftrace_trace_function = ftrace_stub; | 196 | ftrace_trace_function = ftrace_stub; |
204 | } | 197 | } |
205 | 198 | ||
206 | static void control_ops_disable_all(struct ftrace_ops *ops) | 199 | static void per_cpu_ops_disable_all(struct ftrace_ops *ops) |
207 | { | 200 | { |
208 | int cpu; | 201 | int cpu; |
209 | 202 | ||
@@ -211,16 +204,19 @@ static void control_ops_disable_all(struct ftrace_ops *ops) | |||
211 | *per_cpu_ptr(ops->disabled, cpu) = 1; | 204 | *per_cpu_ptr(ops->disabled, cpu) = 1; |
212 | } | 205 | } |
213 | 206 | ||
214 | static int control_ops_alloc(struct ftrace_ops *ops) | 207 | static int per_cpu_ops_alloc(struct ftrace_ops *ops) |
215 | { | 208 | { |
216 | int __percpu *disabled; | 209 | int __percpu *disabled; |
217 | 210 | ||
211 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) | ||
212 | return -EINVAL; | ||
213 | |||
218 | disabled = alloc_percpu(int); | 214 | disabled = alloc_percpu(int); |
219 | if (!disabled) | 215 | if (!disabled) |
220 | return -ENOMEM; | 216 | return -ENOMEM; |
221 | 217 | ||
222 | ops->disabled = disabled; | 218 | ops->disabled = disabled; |
223 | control_ops_disable_all(ops); | 219 | per_cpu_ops_disable_all(ops); |
224 | return 0; | 220 | return 0; |
225 | } | 221 | } |
226 | 222 | ||
@@ -256,10 +252,11 @@ static inline void update_function_graph_func(void) { } | |||
256 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) | 252 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
257 | { | 253 | { |
258 | /* | 254 | /* |
259 | * If this is a dynamic ops or we force list func, | 255 | * If this is a dynamic, RCU, or per CPU ops, or we force list func, |
260 | * then it needs to call the list anyway. | 256 | * then it needs to call the list anyway. |
261 | */ | 257 | */ |
262 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC) | 258 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU | |
259 | FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) | ||
263 | return ftrace_ops_list_func; | 260 | return ftrace_ops_list_func; |
264 | 261 | ||
265 | return ftrace_ops_get_func(ops); | 262 | return ftrace_ops_get_func(ops); |
@@ -383,26 +380,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | |||
383 | return 0; | 380 | return 0; |
384 | } | 381 | } |
385 | 382 | ||
386 | static void add_ftrace_list_ops(struct ftrace_ops **list, | ||
387 | struct ftrace_ops *main_ops, | ||
388 | struct ftrace_ops *ops) | ||
389 | { | ||
390 | int first = *list == &ftrace_list_end; | ||
391 | add_ftrace_ops(list, ops); | ||
392 | if (first) | ||
393 | add_ftrace_ops(&ftrace_ops_list, main_ops); | ||
394 | } | ||
395 | |||
396 | static int remove_ftrace_list_ops(struct ftrace_ops **list, | ||
397 | struct ftrace_ops *main_ops, | ||
398 | struct ftrace_ops *ops) | ||
399 | { | ||
400 | int ret = remove_ftrace_ops(list, ops); | ||
401 | if (!ret && *list == &ftrace_list_end) | ||
402 | ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); | ||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | 383 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
407 | 384 | ||
408 | static int __register_ftrace_function(struct ftrace_ops *ops) | 385 | static int __register_ftrace_function(struct ftrace_ops *ops) |
@@ -430,14 +407,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
430 | if (!core_kernel_data((unsigned long)ops)) | 407 | if (!core_kernel_data((unsigned long)ops)) |
431 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 408 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
432 | 409 | ||
433 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 410 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) { |
434 | if (control_ops_alloc(ops)) | 411 | if (per_cpu_ops_alloc(ops)) |
435 | return -ENOMEM; | 412 | return -ENOMEM; |
436 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | 413 | } |
437 | /* The control_ops needs the trampoline update */ | 414 | |
438 | ops = &control_ops; | 415 | add_ftrace_ops(&ftrace_ops_list, ops); |
439 | } else | ||
440 | add_ftrace_ops(&ftrace_ops_list, ops); | ||
441 | 416 | ||
442 | /* Always save the function, and reset at unregistering */ | 417 | /* Always save the function, and reset at unregistering */ |
443 | ops->saved_func = ops->func; | 418 | ops->saved_func = ops->func; |
@@ -460,11 +435,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
460 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) | 435 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
461 | return -EBUSY; | 436 | return -EBUSY; |
462 | 437 | ||
463 | if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 438 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
464 | ret = remove_ftrace_list_ops(&ftrace_control_list, | ||
465 | &control_ops, ops); | ||
466 | } else | ||
467 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | ||
468 | 439 | ||
469 | if (ret < 0) | 440 | if (ret < 0) |
470 | return ret; | 441 | return ret; |
@@ -1687,6 +1658,9 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1687 | int in_hash = 0; | 1658 | int in_hash = 0; |
1688 | int match = 0; | 1659 | int match = 0; |
1689 | 1660 | ||
1661 | if (rec->flags & FTRACE_FL_DISABLED) | ||
1662 | continue; | ||
1663 | |||
1690 | if (all) { | 1664 | if (all) { |
1691 | /* | 1665 | /* |
1692 | * Only the filter_hash affects all records. | 1666 | * Only the filter_hash affects all records. |
@@ -1940,7 +1914,7 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | |||
1940 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); | 1914 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); |
1941 | } | 1915 | } |
1942 | 1916 | ||
1943 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1917 | static void print_ip_ins(const char *fmt, const unsigned char *p) |
1944 | { | 1918 | { |
1945 | int i; | 1919 | int i; |
1946 | 1920 | ||
@@ -1952,6 +1926,31 @@ static void print_ip_ins(const char *fmt, unsigned char *p) | |||
1952 | 1926 | ||
1953 | static struct ftrace_ops * | 1927 | static struct ftrace_ops * |
1954 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); | 1928 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); |
1929 | static struct ftrace_ops * | ||
1930 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); | ||
1931 | |||
1932 | enum ftrace_bug_type ftrace_bug_type; | ||
1933 | const void *ftrace_expected; | ||
1934 | |||
1935 | static void print_bug_type(void) | ||
1936 | { | ||
1937 | switch (ftrace_bug_type) { | ||
1938 | case FTRACE_BUG_UNKNOWN: | ||
1939 | break; | ||
1940 | case FTRACE_BUG_INIT: | ||
1941 | pr_info("Initializing ftrace call sites\n"); | ||
1942 | break; | ||
1943 | case FTRACE_BUG_NOP: | ||
1944 | pr_info("Setting ftrace call site to NOP\n"); | ||
1945 | break; | ||
1946 | case FTRACE_BUG_CALL: | ||
1947 | pr_info("Setting ftrace call site to call ftrace function\n"); | ||
1948 | break; | ||
1949 | case FTRACE_BUG_UPDATE: | ||
1950 | pr_info("Updating ftrace call site to call a different ftrace function\n"); | ||
1951 | break; | ||
1952 | } | ||
1953 | } | ||
1955 | 1954 | ||
1956 | /** | 1955 | /** |
1957 | * ftrace_bug - report and shutdown function tracer | 1956 | * ftrace_bug - report and shutdown function tracer |
@@ -1979,8 +1978,12 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) | |||
1979 | FTRACE_WARN_ON_ONCE(1); | 1978 | FTRACE_WARN_ON_ONCE(1); |
1980 | pr_info("ftrace failed to modify "); | 1979 | pr_info("ftrace failed to modify "); |
1981 | print_ip_sym(ip); | 1980 | print_ip_sym(ip); |
1982 | print_ip_ins(" actual: ", (unsigned char *)ip); | 1981 | print_ip_ins(" actual: ", (unsigned char *)ip); |
1983 | pr_cont("\n"); | 1982 | pr_cont("\n"); |
1983 | if (ftrace_expected) { | ||
1984 | print_ip_ins(" expected: ", ftrace_expected); | ||
1985 | pr_cont("\n"); | ||
1986 | } | ||
1984 | break; | 1987 | break; |
1985 | case -EPERM: | 1988 | case -EPERM: |
1986 | FTRACE_WARN_ON_ONCE(1); | 1989 | FTRACE_WARN_ON_ONCE(1); |
@@ -1992,6 +1995,7 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) | |||
1992 | pr_info("ftrace faulted on unknown error "); | 1995 | pr_info("ftrace faulted on unknown error "); |
1993 | print_ip_sym(ip); | 1996 | print_ip_sym(ip); |
1994 | } | 1997 | } |
1998 | print_bug_type(); | ||
1995 | if (rec) { | 1999 | if (rec) { |
1996 | struct ftrace_ops *ops = NULL; | 2000 | struct ftrace_ops *ops = NULL; |
1997 | 2001 | ||
@@ -2000,15 +2004,19 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) | |||
2000 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | 2004 | rec->flags & FTRACE_FL_REGS ? " R" : " "); |
2001 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | 2005 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2002 | ops = ftrace_find_tramp_ops_any(rec); | 2006 | ops = ftrace_find_tramp_ops_any(rec); |
2003 | if (ops) | 2007 | if (ops) { |
2004 | pr_cont("\ttramp: %pS", | 2008 | do { |
2005 | (void *)ops->trampoline); | 2009 | pr_cont("\ttramp: %pS (%pS)", |
2006 | else | 2010 | (void *)ops->trampoline, |
2011 | (void *)ops->func); | ||
2012 | ops = ftrace_find_tramp_ops_next(rec, ops); | ||
2013 | } while (ops); | ||
2014 | } else | ||
2007 | pr_cont("\ttramp: ERROR!"); | 2015 | pr_cont("\ttramp: ERROR!"); |
2008 | 2016 | ||
2009 | } | 2017 | } |
2010 | ip = ftrace_get_addr_curr(rec); | 2018 | ip = ftrace_get_addr_curr(rec); |
2011 | pr_cont(" expected tramp: %lx\n", ip); | 2019 | pr_cont("\n expected tramp: %lx\n", ip); |
2012 | } | 2020 | } |
2013 | } | 2021 | } |
2014 | 2022 | ||
@@ -2016,6 +2024,11 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2016 | { | 2024 | { |
2017 | unsigned long flag = 0UL; | 2025 | unsigned long flag = 0UL; |
2018 | 2026 | ||
2027 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; | ||
2028 | |||
2029 | if (rec->flags & FTRACE_FL_DISABLED) | ||
2030 | return FTRACE_UPDATE_IGNORE; | ||
2031 | |||
2019 | /* | 2032 | /* |
2020 | * If we are updating calls: | 2033 | * If we are updating calls: |
2021 | * | 2034 | * |
@@ -2077,9 +2090,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2077 | * from the save regs, to a non-save regs function or | 2090 | * from the save regs, to a non-save regs function or |
2078 | * vice versa, or from a trampoline call. | 2091 | * vice versa, or from a trampoline call. |
2079 | */ | 2092 | */ |
2080 | if (flag & FTRACE_FL_ENABLED) | 2093 | if (flag & FTRACE_FL_ENABLED) { |
2094 | ftrace_bug_type = FTRACE_BUG_CALL; | ||
2081 | return FTRACE_UPDATE_MAKE_CALL; | 2095 | return FTRACE_UPDATE_MAKE_CALL; |
2096 | } | ||
2082 | 2097 | ||
2098 | ftrace_bug_type = FTRACE_BUG_UPDATE; | ||
2083 | return FTRACE_UPDATE_MODIFY_CALL; | 2099 | return FTRACE_UPDATE_MODIFY_CALL; |
2084 | } | 2100 | } |
2085 | 2101 | ||
@@ -2096,6 +2112,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2096 | FTRACE_FL_REGS_EN); | 2112 | FTRACE_FL_REGS_EN); |
2097 | } | 2113 | } |
2098 | 2114 | ||
2115 | ftrace_bug_type = FTRACE_BUG_NOP; | ||
2099 | return FTRACE_UPDATE_MAKE_NOP; | 2116 | return FTRACE_UPDATE_MAKE_NOP; |
2100 | } | 2117 | } |
2101 | 2118 | ||
@@ -2145,6 +2162,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) | |||
2145 | } | 2162 | } |
2146 | 2163 | ||
2147 | static struct ftrace_ops * | 2164 | static struct ftrace_ops * |
2165 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, | ||
2166 | struct ftrace_ops *op) | ||
2167 | { | ||
2168 | unsigned long ip = rec->ip; | ||
2169 | |||
2170 | while_for_each_ftrace_op(op) { | ||
2171 | |||
2172 | if (!op->trampoline) | ||
2173 | continue; | ||
2174 | |||
2175 | if (hash_contains_ip(ip, op->func_hash)) | ||
2176 | return op; | ||
2177 | } | ||
2178 | |||
2179 | return NULL; | ||
2180 | } | ||
2181 | |||
2182 | static struct ftrace_ops * | ||
2148 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | 2183 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) |
2149 | { | 2184 | { |
2150 | struct ftrace_ops *op; | 2185 | struct ftrace_ops *op; |
@@ -2307,17 +2342,22 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
2307 | 2342 | ||
2308 | ret = ftrace_update_record(rec, enable); | 2343 | ret = ftrace_update_record(rec, enable); |
2309 | 2344 | ||
2345 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; | ||
2346 | |||
2310 | switch (ret) { | 2347 | switch (ret) { |
2311 | case FTRACE_UPDATE_IGNORE: | 2348 | case FTRACE_UPDATE_IGNORE: |
2312 | return 0; | 2349 | return 0; |
2313 | 2350 | ||
2314 | case FTRACE_UPDATE_MAKE_CALL: | 2351 | case FTRACE_UPDATE_MAKE_CALL: |
2352 | ftrace_bug_type = FTRACE_BUG_CALL; | ||
2315 | return ftrace_make_call(rec, ftrace_addr); | 2353 | return ftrace_make_call(rec, ftrace_addr); |
2316 | 2354 | ||
2317 | case FTRACE_UPDATE_MAKE_NOP: | 2355 | case FTRACE_UPDATE_MAKE_NOP: |
2356 | ftrace_bug_type = FTRACE_BUG_NOP; | ||
2318 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); | 2357 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
2319 | 2358 | ||
2320 | case FTRACE_UPDATE_MODIFY_CALL: | 2359 | case FTRACE_UPDATE_MODIFY_CALL: |
2360 | ftrace_bug_type = FTRACE_BUG_UPDATE; | ||
2321 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | 2361 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
2322 | } | 2362 | } |
2323 | 2363 | ||
@@ -2425,6 +2465,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
2425 | 2465 | ||
2426 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 2466 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
2427 | if (ret) { | 2467 | if (ret) { |
2468 | ftrace_bug_type = FTRACE_BUG_INIT; | ||
2428 | ftrace_bug(ret, rec); | 2469 | ftrace_bug(ret, rec); |
2429 | return 0; | 2470 | return 0; |
2430 | } | 2471 | } |
@@ -2566,7 +2607,7 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) | |||
2566 | { | 2607 | { |
2567 | } | 2608 | } |
2568 | 2609 | ||
2569 | static void control_ops_free(struct ftrace_ops *ops) | 2610 | static void per_cpu_ops_free(struct ftrace_ops *ops) |
2570 | { | 2611 | { |
2571 | free_percpu(ops->disabled); | 2612 | free_percpu(ops->disabled); |
2572 | } | 2613 | } |
@@ -2667,13 +2708,13 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2667 | 2708 | ||
2668 | if (!command || !ftrace_enabled) { | 2709 | if (!command || !ftrace_enabled) { |
2669 | /* | 2710 | /* |
2670 | * If these are control ops, they still need their | 2711 | * If these are per_cpu ops, they still need their |
2671 | * per_cpu field freed. Since, function tracing is | 2712 | * per_cpu field freed. Since, function tracing is |
2672 | * not currently active, we can just free them | 2713 | * not currently active, we can just free them |
2673 | * without synchronizing all CPUs. | 2714 | * without synchronizing all CPUs. |
2674 | */ | 2715 | */ |
2675 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2716 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) |
2676 | control_ops_free(ops); | 2717 | per_cpu_ops_free(ops); |
2677 | return 0; | 2718 | return 0; |
2678 | } | 2719 | } |
2679 | 2720 | ||
@@ -2714,7 +2755,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2714 | /* | 2755 | /* |
2715 | * Dynamic ops may be freed, we must make sure that all | 2756 | * Dynamic ops may be freed, we must make sure that all |
2716 | * callers are done before leaving this function. | 2757 | * callers are done before leaving this function. |
2717 | * The same goes for freeing the per_cpu data of the control | 2758 | * The same goes for freeing the per_cpu data of the per_cpu |
2718 | * ops. | 2759 | * ops. |
2719 | * | 2760 | * |
2720 | * Again, normal synchronize_sched() is not good enough. | 2761 | * Again, normal synchronize_sched() is not good enough. |
@@ -2725,13 +2766,13 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2725 | * infrastructure to do the synchronization, thus we must do it | 2766 | * infrastructure to do the synchronization, thus we must do it |
2726 | * ourselves. | 2767 | * ourselves. |
2727 | */ | 2768 | */ |
2728 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | 2769 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { |
2729 | schedule_on_each_cpu(ftrace_sync); | 2770 | schedule_on_each_cpu(ftrace_sync); |
2730 | 2771 | ||
2731 | arch_ftrace_trampoline_free(ops); | 2772 | arch_ftrace_trampoline_free(ops); |
2732 | 2773 | ||
2733 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2774 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) |
2734 | control_ops_free(ops); | 2775 | per_cpu_ops_free(ops); |
2735 | } | 2776 | } |
2736 | 2777 | ||
2737 | return 0; | 2778 | return 0; |
@@ -2798,9 +2839,9 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
2798 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | 2839 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
2799 | return 0; | 2840 | return 0; |
2800 | 2841 | ||
2801 | /* If ops traces all mods, we already accounted for it */ | 2842 | /* If ops traces all then it includes this function */ |
2802 | if (ops_traces_mod(ops)) | 2843 | if (ops_traces_mod(ops)) |
2803 | return 0; | 2844 | return 1; |
2804 | 2845 | ||
2805 | /* The function must be in the filter */ | 2846 | /* The function must be in the filter */ |
2806 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && | 2847 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
@@ -2814,64 +2855,41 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |||
2814 | return 1; | 2855 | return 1; |
2815 | } | 2856 | } |
2816 | 2857 | ||
2817 | static int referenced_filters(struct dyn_ftrace *rec) | ||
2818 | { | ||
2819 | struct ftrace_ops *ops; | ||
2820 | int cnt = 0; | ||
2821 | |||
2822 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | ||
2823 | if (ops_references_rec(ops, rec)) | ||
2824 | cnt++; | ||
2825 | } | ||
2826 | |||
2827 | return cnt; | ||
2828 | } | ||
2829 | |||
2830 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | 2858 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
2831 | { | 2859 | { |
2832 | struct ftrace_page *pg; | 2860 | struct ftrace_page *pg; |
2833 | struct dyn_ftrace *p; | 2861 | struct dyn_ftrace *p; |
2834 | cycle_t start, stop; | 2862 | cycle_t start, stop; |
2835 | unsigned long update_cnt = 0; | 2863 | unsigned long update_cnt = 0; |
2836 | unsigned long ref = 0; | 2864 | unsigned long rec_flags = 0; |
2837 | bool test = false; | ||
2838 | int i; | 2865 | int i; |
2839 | 2866 | ||
2867 | start = ftrace_now(raw_smp_processor_id()); | ||
2868 | |||
2840 | /* | 2869 | /* |
2841 | * When adding a module, we need to check if tracers are | 2870 | * When a module is loaded, this function is called to convert |
2842 | * currently enabled and if they are set to trace all functions. | 2871 | * the calls to mcount in its text to nops, and also to create |
2843 | * If they are, we need to enable the module functions as well | 2872 | * an entry in the ftrace data. Now, if ftrace is activated |
2844 | * as update the reference counts for those function records. | 2873 | * after this call, but before the module sets its text to |
2874 | * read-only, the modification of enabling ftrace can fail if | ||
2875 | * the read-only is done while ftrace is converting the calls. | ||
2876 | * To prevent this, the module's records are set as disabled | ||
2877 | * and will be enabled after the call to set the module's text | ||
2878 | * to read-only. | ||
2845 | */ | 2879 | */ |
2846 | if (mod) { | 2880 | if (mod) |
2847 | struct ftrace_ops *ops; | 2881 | rec_flags |= FTRACE_FL_DISABLED; |
2848 | |||
2849 | for (ops = ftrace_ops_list; | ||
2850 | ops != &ftrace_list_end; ops = ops->next) { | ||
2851 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
2852 | if (ops_traces_mod(ops)) | ||
2853 | ref++; | ||
2854 | else | ||
2855 | test = true; | ||
2856 | } | ||
2857 | } | ||
2858 | } | ||
2859 | |||
2860 | start = ftrace_now(raw_smp_processor_id()); | ||
2861 | 2882 | ||
2862 | for (pg = new_pgs; pg; pg = pg->next) { | 2883 | for (pg = new_pgs; pg; pg = pg->next) { |
2863 | 2884 | ||
2864 | for (i = 0; i < pg->index; i++) { | 2885 | for (i = 0; i < pg->index; i++) { |
2865 | int cnt = ref; | ||
2866 | 2886 | ||
2867 | /* If something went wrong, bail without enabling anything */ | 2887 | /* If something went wrong, bail without enabling anything */ |
2868 | if (unlikely(ftrace_disabled)) | 2888 | if (unlikely(ftrace_disabled)) |
2869 | return -1; | 2889 | return -1; |
2870 | 2890 | ||
2871 | p = &pg->records[i]; | 2891 | p = &pg->records[i]; |
2872 | if (test) | 2892 | p->flags = rec_flags; |
2873 | cnt += referenced_filters(p); | ||
2874 | p->flags = cnt; | ||
2875 | 2893 | ||
2876 | /* | 2894 | /* |
2877 | * Do the initial record conversion from mcount jump | 2895 | * Do the initial record conversion from mcount jump |
@@ -2881,21 +2899,6 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | |||
2881 | break; | 2899 | break; |
2882 | 2900 | ||
2883 | update_cnt++; | 2901 | update_cnt++; |
2884 | |||
2885 | /* | ||
2886 | * If the tracing is enabled, go ahead and enable the record. | ||
2887 | * | ||
2888 | * The reason not to enable the record immediatelly is the | ||
2889 | * inherent check of ftrace_make_nop/ftrace_make_call for | ||
2890 | * correct previous instructions. Making first the NOP | ||
2891 | * conversion puts the module to the correct state, thus | ||
2892 | * passing the ftrace_make_call check. | ||
2893 | */ | ||
2894 | if (ftrace_start_up && cnt) { | ||
2895 | int failed = __ftrace_replace_code(p, 1); | ||
2896 | if (failed) | ||
2897 | ftrace_bug(failed, p); | ||
2898 | } | ||
2899 | } | 2902 | } |
2900 | } | 2903 | } |
2901 | 2904 | ||
@@ -3258,7 +3261,7 @@ static int t_show(struct seq_file *m, void *v) | |||
3258 | 3261 | ||
3259 | seq_printf(m, "%ps", (void *)rec->ip); | 3262 | seq_printf(m, "%ps", (void *)rec->ip); |
3260 | if (iter->flags & FTRACE_ITER_ENABLED) { | 3263 | if (iter->flags & FTRACE_ITER_ENABLED) { |
3261 | struct ftrace_ops *ops = NULL; | 3264 | struct ftrace_ops *ops; |
3262 | 3265 | ||
3263 | seq_printf(m, " (%ld)%s%s", | 3266 | seq_printf(m, " (%ld)%s%s", |
3264 | ftrace_rec_count(rec), | 3267 | ftrace_rec_count(rec), |
@@ -3266,14 +3269,19 @@ static int t_show(struct seq_file *m, void *v) | |||
3266 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); | 3269 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); |
3267 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | 3270 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
3268 | ops = ftrace_find_tramp_ops_any(rec); | 3271 | ops = ftrace_find_tramp_ops_any(rec); |
3269 | if (ops) | 3272 | if (ops) { |
3270 | seq_printf(m, "\ttramp: %pS", | 3273 | do { |
3271 | (void *)ops->trampoline); | 3274 | seq_printf(m, "\ttramp: %pS (%pS)", |
3272 | else | 3275 | (void *)ops->trampoline, |
3276 | (void *)ops->func); | ||
3277 | add_trampoline_func(m, ops, rec); | ||
3278 | ops = ftrace_find_tramp_ops_next(rec, ops); | ||
3279 | } while (ops); | ||
3280 | } else | ||
3273 | seq_puts(m, "\ttramp: ERROR!"); | 3281 | seq_puts(m, "\ttramp: ERROR!"); |
3274 | 3282 | } else { | |
3283 | add_trampoline_func(m, NULL, rec); | ||
3275 | } | 3284 | } |
3276 | add_trampoline_func(m, ops, rec); | ||
3277 | } | 3285 | } |
3278 | 3286 | ||
3279 | seq_putc(m, '\n'); | 3287 | seq_putc(m, '\n'); |
@@ -4898,6 +4906,19 @@ static int ftrace_process_locs(struct module *mod, | |||
4898 | 4906 | ||
4899 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) | 4907 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
4900 | 4908 | ||
4909 | static int referenced_filters(struct dyn_ftrace *rec) | ||
4910 | { | ||
4911 | struct ftrace_ops *ops; | ||
4912 | int cnt = 0; | ||
4913 | |||
4914 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | ||
4915 | if (ops_references_rec(ops, rec)) | ||
4916 | cnt++; | ||
4917 | } | ||
4918 | |||
4919 | return cnt; | ||
4920 | } | ||
4921 | |||
4901 | void ftrace_release_mod(struct module *mod) | 4922 | void ftrace_release_mod(struct module *mod) |
4902 | { | 4923 | { |
4903 | struct dyn_ftrace *rec; | 4924 | struct dyn_ftrace *rec; |
@@ -4940,41 +4961,112 @@ void ftrace_release_mod(struct module *mod) | |||
4940 | mutex_unlock(&ftrace_lock); | 4961 | mutex_unlock(&ftrace_lock); |
4941 | } | 4962 | } |
4942 | 4963 | ||
4943 | static void ftrace_init_module(struct module *mod, | 4964 | static void ftrace_module_enable(struct module *mod) |
4944 | unsigned long *start, unsigned long *end) | ||
4945 | { | 4965 | { |
4946 | if (ftrace_disabled || start == end) | 4966 | struct dyn_ftrace *rec; |
4947 | return; | 4967 | struct ftrace_page *pg; |
4948 | ftrace_process_locs(mod, start, end); | 4968 | |
4969 | mutex_lock(&ftrace_lock); | ||
4970 | |||
4971 | if (ftrace_disabled) | ||
4972 | goto out_unlock; | ||
4973 | |||
4974 | /* | ||
4975 | * If the tracing is enabled, go ahead and enable the record. | ||
4976 | * | ||
4977 | * The reason not to enable the record immediatelly is the | ||
4978 | * inherent check of ftrace_make_nop/ftrace_make_call for | ||
4979 | * correct previous instructions. Making first the NOP | ||
4980 | * conversion puts the module to the correct state, thus | ||
4981 | * passing the ftrace_make_call check. | ||
4982 | * | ||
4983 | * We also delay this to after the module code already set the | ||
4984 | * text to read-only, as we now need to set it back to read-write | ||
4985 | * so that we can modify the text. | ||
4986 | */ | ||
4987 | if (ftrace_start_up) | ||
4988 | ftrace_arch_code_modify_prepare(); | ||
4989 | |||
4990 | do_for_each_ftrace_rec(pg, rec) { | ||
4991 | int cnt; | ||
4992 | /* | ||
4993 | * do_for_each_ftrace_rec() is a double loop. | ||
4994 | * module text shares the pg. If a record is | ||
4995 | * not part of this module, then skip this pg, | ||
4996 | * which the "break" will do. | ||
4997 | */ | ||
4998 | if (!within_module_core(rec->ip, mod)) | ||
4999 | break; | ||
5000 | |||
5001 | cnt = 0; | ||
5002 | |||
5003 | /* | ||
5004 | * When adding a module, we need to check if tracers are | ||
5005 | * currently enabled and if they are, and can trace this record, | ||
5006 | * we need to enable the module functions as well as update the | ||
5007 | * reference counts for those function records. | ||
5008 | */ | ||
5009 | if (ftrace_start_up) | ||
5010 | cnt += referenced_filters(rec); | ||
5011 | |||
5012 | /* This clears FTRACE_FL_DISABLED */ | ||
5013 | rec->flags = cnt; | ||
5014 | |||
5015 | if (ftrace_start_up && cnt) { | ||
5016 | int failed = __ftrace_replace_code(rec, 1); | ||
5017 | if (failed) { | ||
5018 | ftrace_bug(failed, rec); | ||
5019 | goto out_loop; | ||
5020 | } | ||
5021 | } | ||
5022 | |||
5023 | } while_for_each_ftrace_rec(); | ||
5024 | |||
5025 | out_loop: | ||
5026 | if (ftrace_start_up) | ||
5027 | ftrace_arch_code_modify_post_process(); | ||
5028 | |||
5029 | out_unlock: | ||
5030 | mutex_unlock(&ftrace_lock); | ||
4949 | } | 5031 | } |
4950 | 5032 | ||
4951 | void ftrace_module_init(struct module *mod) | 5033 | void ftrace_module_init(struct module *mod) |
4952 | { | 5034 | { |
4953 | ftrace_init_module(mod, mod->ftrace_callsites, | 5035 | if (ftrace_disabled || !mod->num_ftrace_callsites) |
4954 | mod->ftrace_callsites + | 5036 | return; |
4955 | mod->num_ftrace_callsites); | 5037 | |
5038 | ftrace_process_locs(mod, mod->ftrace_callsites, | ||
5039 | mod->ftrace_callsites + mod->num_ftrace_callsites); | ||
4956 | } | 5040 | } |
4957 | 5041 | ||
4958 | static int ftrace_module_notify_exit(struct notifier_block *self, | 5042 | static int ftrace_module_notify(struct notifier_block *self, |
4959 | unsigned long val, void *data) | 5043 | unsigned long val, void *data) |
4960 | { | 5044 | { |
4961 | struct module *mod = data; | 5045 | struct module *mod = data; |
4962 | 5046 | ||
4963 | if (val == MODULE_STATE_GOING) | 5047 | switch (val) { |
5048 | case MODULE_STATE_COMING: | ||
5049 | ftrace_module_enable(mod); | ||
5050 | break; | ||
5051 | case MODULE_STATE_GOING: | ||
4964 | ftrace_release_mod(mod); | 5052 | ftrace_release_mod(mod); |
5053 | break; | ||
5054 | default: | ||
5055 | break; | ||
5056 | } | ||
4965 | 5057 | ||
4966 | return 0; | 5058 | return 0; |
4967 | } | 5059 | } |
4968 | #else | 5060 | #else |
4969 | static int ftrace_module_notify_exit(struct notifier_block *self, | 5061 | static int ftrace_module_notify(struct notifier_block *self, |
4970 | unsigned long val, void *data) | 5062 | unsigned long val, void *data) |
4971 | { | 5063 | { |
4972 | return 0; | 5064 | return 0; |
4973 | } | 5065 | } |
4974 | #endif /* CONFIG_MODULES */ | 5066 | #endif /* CONFIG_MODULES */ |
4975 | 5067 | ||
4976 | struct notifier_block ftrace_module_exit_nb = { | 5068 | struct notifier_block ftrace_module_nb = { |
4977 | .notifier_call = ftrace_module_notify_exit, | 5069 | .notifier_call = ftrace_module_notify, |
4978 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 5070 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
4979 | }; | 5071 | }; |
4980 | 5072 | ||
@@ -5006,7 +5098,7 @@ void __init ftrace_init(void) | |||
5006 | __start_mcount_loc, | 5098 | __start_mcount_loc, |
5007 | __stop_mcount_loc); | 5099 | __stop_mcount_loc); |
5008 | 5100 | ||
5009 | ret = register_module_notifier(&ftrace_module_exit_nb); | 5101 | ret = register_module_notifier(&ftrace_module_nb); |
5010 | if (ret) | 5102 | if (ret) |
5011 | pr_warning("Failed to register trace ftrace module exit notifier\n"); | 5103 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
5012 | 5104 | ||
@@ -5116,44 +5208,6 @@ void ftrace_reset_array_ops(struct trace_array *tr) | |||
5116 | tr->ops->func = ftrace_stub; | 5208 | tr->ops->func = ftrace_stub; |
5117 | } | 5209 | } |
5118 | 5210 | ||
5119 | static void | ||
5120 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | ||
5121 | struct ftrace_ops *op, struct pt_regs *regs) | ||
5122 | { | ||
5123 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | ||
5124 | return; | ||
5125 | |||
5126 | /* | ||
5127 | * Some of the ops may be dynamically allocated, | ||
5128 | * they must be freed after a synchronize_sched(). | ||
5129 | */ | ||
5130 | preempt_disable_notrace(); | ||
5131 | trace_recursion_set(TRACE_CONTROL_BIT); | ||
5132 | |||
5133 | /* | ||
5134 | * Control funcs (perf) uses RCU. Only trace if | ||
5135 | * RCU is currently active. | ||
5136 | */ | ||
5137 | if (!rcu_is_watching()) | ||
5138 | goto out; | ||
5139 | |||
5140 | do_for_each_ftrace_op(op, ftrace_control_list) { | ||
5141 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | ||
5142 | !ftrace_function_local_disabled(op) && | ||
5143 | ftrace_ops_test(op, ip, regs)) | ||
5144 | op->func(ip, parent_ip, op, regs); | ||
5145 | } while_for_each_ftrace_op(op); | ||
5146 | out: | ||
5147 | trace_recursion_clear(TRACE_CONTROL_BIT); | ||
5148 | preempt_enable_notrace(); | ||
5149 | } | ||
5150 | |||
5151 | static struct ftrace_ops control_ops = { | ||
5152 | .func = ftrace_ops_control_func, | ||
5153 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | ||
5154 | INIT_OPS_HASH(control_ops) | ||
5155 | }; | ||
5156 | |||
5157 | static inline void | 5211 | static inline void |
5158 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 5212 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
5159 | struct ftrace_ops *ignored, struct pt_regs *regs) | 5213 | struct ftrace_ops *ignored, struct pt_regs *regs) |
@@ -5170,8 +5224,22 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
5170 | * they must be freed after a synchronize_sched(). | 5224 | * they must be freed after a synchronize_sched(). |
5171 | */ | 5225 | */ |
5172 | preempt_disable_notrace(); | 5226 | preempt_disable_notrace(); |
5227 | |||
5173 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 5228 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
5174 | if (ftrace_ops_test(op, ip, regs)) { | 5229 | /* |
5230 | * Check the following for each ops before calling their func: | ||
5231 | * if RCU flag is set, then rcu_is_watching() must be true | ||
5232 | * if PER_CPU is set, then ftrace_function_local_disable() | ||
5233 | * must be false | ||
5234 | * Otherwise test if the ip matches the ops filter | ||
5235 | * | ||
5236 | * If any of the above fails then the op->func() is not executed. | ||
5237 | */ | ||
5238 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && | ||
5239 | (!(op->flags & FTRACE_OPS_FL_PER_CPU) || | ||
5240 | !ftrace_function_local_disabled(op)) && | ||
5241 | ftrace_ops_test(op, ip, regs)) { | ||
5242 | |||
5175 | if (FTRACE_WARN_ON(!op->func)) { | 5243 | if (FTRACE_WARN_ON(!op->func)) { |
5176 | pr_warn("op=%p %pS\n", op, op); | 5244 | pr_warn("op=%p %pS\n", op, op); |
5177 | goto out; | 5245 | goto out; |
@@ -5195,7 +5263,7 @@ out: | |||
5195 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. | 5263 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
5196 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. | 5264 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
5197 | * An architecture can pass partial regs with ftrace_ops and still | 5265 | * An architecture can pass partial regs with ftrace_ops and still |
5198 | * set the ARCH_SUPPORT_FTARCE_OPS. | 5266 | * set the ARCH_SUPPORTS_FTRACE_OPS. |
5199 | */ | 5267 | */ |
5200 | #if ARCH_SUPPORTS_FTRACE_OPS | 5268 | #if ARCH_SUPPORTS_FTRACE_OPS |
5201 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 5269 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
@@ -5212,20 +5280,29 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | |||
5212 | 5280 | ||
5213 | /* | 5281 | /* |
5214 | * If there's only one function registered but it does not support | 5282 | * If there's only one function registered but it does not support |
5215 | * recursion, this function will be called by the mcount trampoline. | 5283 | * recursion, needs RCU protection and/or requires per cpu handling, then |
5216 | * This function will handle recursion protection. | 5284 | * this function will be called by the mcount trampoline. |
5217 | */ | 5285 | */ |
5218 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | 5286 | static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, |
5219 | struct ftrace_ops *op, struct pt_regs *regs) | 5287 | struct ftrace_ops *op, struct pt_regs *regs) |
5220 | { | 5288 | { |
5221 | int bit; | 5289 | int bit; |
5222 | 5290 | ||
5291 | if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) | ||
5292 | return; | ||
5293 | |||
5223 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | 5294 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
5224 | if (bit < 0) | 5295 | if (bit < 0) |
5225 | return; | 5296 | return; |
5226 | 5297 | ||
5227 | op->func(ip, parent_ip, op, regs); | 5298 | preempt_disable_notrace(); |
5228 | 5299 | ||
5300 | if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || | ||
5301 | !ftrace_function_local_disabled(op)) { | ||
5302 | op->func(ip, parent_ip, op, regs); | ||
5303 | } | ||
5304 | |||
5305 | preempt_enable_notrace(); | ||
5229 | trace_clear_recursion(bit); | 5306 | trace_clear_recursion(bit); |
5230 | } | 5307 | } |
5231 | 5308 | ||
@@ -5243,12 +5320,12 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | |||
5243 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | 5320 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) |
5244 | { | 5321 | { |
5245 | /* | 5322 | /* |
5246 | * If the func handles its own recursion, call it directly. | 5323 | * If the function does not handle recursion, needs to be RCU safe, |
5247 | * Otherwise call the recursion protected function that | 5324 | * or does per cpu logic, then we need to call the assist handler. |
5248 | * will call the ftrace ops function. | ||
5249 | */ | 5325 | */ |
5250 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE)) | 5326 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || |
5251 | return ftrace_ops_recurs_func; | 5327 | ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU)) |
5328 | return ftrace_ops_assist_func; | ||
5252 | 5329 | ||
5253 | return ops->func; | 5330 | return ops->func; |
5254 | } | 5331 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9c6045a27ba3..95181e36891a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1001,17 +1001,13 @@ static int rb_head_page_replace(struct buffer_page *old, | |||
1001 | 1001 | ||
1002 | /* | 1002 | /* |
1003 | * rb_tail_page_update - move the tail page forward | 1003 | * rb_tail_page_update - move the tail page forward |
1004 | * | ||
1005 | * Returns 1 if moved tail page, 0 if someone else did. | ||
1006 | */ | 1004 | */ |
1007 | static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | 1005 | static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, |
1008 | struct buffer_page *tail_page, | 1006 | struct buffer_page *tail_page, |
1009 | struct buffer_page *next_page) | 1007 | struct buffer_page *next_page) |
1010 | { | 1008 | { |
1011 | struct buffer_page *old_tail; | ||
1012 | unsigned long old_entries; | 1009 | unsigned long old_entries; |
1013 | unsigned long old_write; | 1010 | unsigned long old_write; |
1014 | int ret = 0; | ||
1015 | 1011 | ||
1016 | /* | 1012 | /* |
1017 | * The tail page now needs to be moved forward. | 1013 | * The tail page now needs to be moved forward. |
@@ -1036,7 +1032,7 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | |||
1036 | * it is, then it is up to us to update the tail | 1032 | * it is, then it is up to us to update the tail |
1037 | * pointer. | 1033 | * pointer. |
1038 | */ | 1034 | */ |
1039 | if (tail_page == cpu_buffer->tail_page) { | 1035 | if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { |
1040 | /* Zero the write counter */ | 1036 | /* Zero the write counter */ |
1041 | unsigned long val = old_write & ~RB_WRITE_MASK; | 1037 | unsigned long val = old_write & ~RB_WRITE_MASK; |
1042 | unsigned long eval = old_entries & ~RB_WRITE_MASK; | 1038 | unsigned long eval = old_entries & ~RB_WRITE_MASK; |
@@ -1061,14 +1057,9 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | |||
1061 | */ | 1057 | */ |
1062 | local_set(&next_page->page->commit, 0); | 1058 | local_set(&next_page->page->commit, 0); |
1063 | 1059 | ||
1064 | old_tail = cmpxchg(&cpu_buffer->tail_page, | 1060 | /* Again, either we update tail_page or an interrupt does */ |
1065 | tail_page, next_page); | 1061 | (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); |
1066 | |||
1067 | if (old_tail == tail_page) | ||
1068 | ret = 1; | ||
1069 | } | 1062 | } |
1070 | |||
1071 | return ret; | ||
1072 | } | 1063 | } |
1073 | 1064 | ||
1074 | static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, | 1065 | static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -2036,12 +2027,15 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, | |||
2036 | * the tail page would have moved. | 2027 | * the tail page would have moved. |
2037 | */ | 2028 | */ |
2038 | if (ret == RB_PAGE_NORMAL) { | 2029 | if (ret == RB_PAGE_NORMAL) { |
2030 | struct buffer_page *buffer_tail_page; | ||
2031 | |||
2032 | buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); | ||
2039 | /* | 2033 | /* |
2040 | * If the tail had moved passed next, then we need | 2034 | * If the tail had moved passed next, then we need |
2041 | * to reset the pointer. | 2035 | * to reset the pointer. |
2042 | */ | 2036 | */ |
2043 | if (cpu_buffer->tail_page != tail_page && | 2037 | if (buffer_tail_page != tail_page && |
2044 | cpu_buffer->tail_page != next_page) | 2038 | buffer_tail_page != next_page) |
2045 | rb_head_page_set_normal(cpu_buffer, new_head, | 2039 | rb_head_page_set_normal(cpu_buffer, new_head, |
2046 | next_page, | 2040 | next_page, |
2047 | RB_PAGE_HEAD); | 2041 | RB_PAGE_HEAD); |
@@ -2135,6 +2129,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
2135 | local_sub(length, &tail_page->write); | 2129 | local_sub(length, &tail_page->write); |
2136 | } | 2130 | } |
2137 | 2131 | ||
2132 | static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); | ||
2133 | |||
2138 | /* | 2134 | /* |
2139 | * This is the slow path, force gcc not to inline it. | 2135 | * This is the slow path, force gcc not to inline it. |
2140 | */ | 2136 | */ |
@@ -2147,7 +2143,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
2147 | struct ring_buffer *buffer = cpu_buffer->buffer; | 2143 | struct ring_buffer *buffer = cpu_buffer->buffer; |
2148 | struct buffer_page *next_page; | 2144 | struct buffer_page *next_page; |
2149 | int ret; | 2145 | int ret; |
2150 | u64 ts; | ||
2151 | 2146 | ||
2152 | next_page = tail_page; | 2147 | next_page = tail_page; |
2153 | 2148 | ||
@@ -2221,20 +2216,17 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
2221 | } | 2216 | } |
2222 | } | 2217 | } |
2223 | 2218 | ||
2224 | ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); | 2219 | rb_tail_page_update(cpu_buffer, tail_page, next_page); |
2225 | if (ret) { | ||
2226 | /* | ||
2227 | * Nested commits always have zero deltas, so | ||
2228 | * just reread the time stamp | ||
2229 | */ | ||
2230 | ts = rb_time_stamp(buffer); | ||
2231 | next_page->page->time_stamp = ts; | ||
2232 | } | ||
2233 | 2220 | ||
2234 | out_again: | 2221 | out_again: |
2235 | 2222 | ||
2236 | rb_reset_tail(cpu_buffer, tail, info); | 2223 | rb_reset_tail(cpu_buffer, tail, info); |
2237 | 2224 | ||
2225 | /* Commit what we have for now. */ | ||
2226 | rb_end_commit(cpu_buffer); | ||
2227 | /* rb_end_commit() decs committing */ | ||
2228 | local_inc(&cpu_buffer->committing); | ||
2229 | |||
2238 | /* fail and let the caller try again */ | 2230 | /* fail and let the caller try again */ |
2239 | return ERR_PTR(-EAGAIN); | 2231 | return ERR_PTR(-EAGAIN); |
2240 | 2232 | ||
@@ -2362,7 +2354,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, | |||
2362 | addr = (unsigned long)event; | 2354 | addr = (unsigned long)event; |
2363 | addr &= PAGE_MASK; | 2355 | addr &= PAGE_MASK; |
2364 | 2356 | ||
2365 | bpage = cpu_buffer->tail_page; | 2357 | bpage = READ_ONCE(cpu_buffer->tail_page); |
2366 | 2358 | ||
2367 | if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { | 2359 | if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { |
2368 | unsigned long write_mask = | 2360 | unsigned long write_mask = |
@@ -2410,7 +2402,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
2410 | again: | 2402 | again: |
2411 | max_count = cpu_buffer->nr_pages * 100; | 2403 | max_count = cpu_buffer->nr_pages * 100; |
2412 | 2404 | ||
2413 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 2405 | while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { |
2414 | if (RB_WARN_ON(cpu_buffer, !(--max_count))) | 2406 | if (RB_WARN_ON(cpu_buffer, !(--max_count))) |
2415 | return; | 2407 | return; |
2416 | if (RB_WARN_ON(cpu_buffer, | 2408 | if (RB_WARN_ON(cpu_buffer, |
@@ -2419,8 +2411,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
2419 | local_set(&cpu_buffer->commit_page->page->commit, | 2411 | local_set(&cpu_buffer->commit_page->page->commit, |
2420 | rb_page_write(cpu_buffer->commit_page)); | 2412 | rb_page_write(cpu_buffer->commit_page)); |
2421 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 2413 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
2422 | cpu_buffer->write_stamp = | 2414 | /* Only update the write stamp if the page has an event */ |
2423 | cpu_buffer->commit_page->page->time_stamp; | 2415 | if (rb_page_write(cpu_buffer->commit_page)) |
2416 | cpu_buffer->write_stamp = | ||
2417 | cpu_buffer->commit_page->page->time_stamp; | ||
2424 | /* add barrier to keep gcc from optimizing too much */ | 2418 | /* add barrier to keep gcc from optimizing too much */ |
2425 | barrier(); | 2419 | barrier(); |
2426 | } | 2420 | } |
@@ -2443,7 +2437,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
2443 | * and pushed the tail page forward, we will be left with | 2437 | * and pushed the tail page forward, we will be left with |
2444 | * a dangling commit that will never go forward. | 2438 | * a dangling commit that will never go forward. |
2445 | */ | 2439 | */ |
2446 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | 2440 | if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) |
2447 | goto again; | 2441 | goto again; |
2448 | } | 2442 | } |
2449 | 2443 | ||
@@ -2699,7 +2693,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
2699 | if (unlikely(info->add_timestamp)) | 2693 | if (unlikely(info->add_timestamp)) |
2700 | info->length += RB_LEN_TIME_EXTEND; | 2694 | info->length += RB_LEN_TIME_EXTEND; |
2701 | 2695 | ||
2702 | tail_page = info->tail_page = cpu_buffer->tail_page; | 2696 | /* Don't let the compiler play games with cpu_buffer->tail_page */ |
2697 | tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); | ||
2703 | write = local_add_return(info->length, &tail_page->write); | 2698 | write = local_add_return(info->length, &tail_page->write); |
2704 | 2699 | ||
2705 | /* set write to only the index of the write */ | 2700 | /* set write to only the index of the write */ |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 919d9d07686f..8414fa40bf27 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -363,8 +363,8 @@ struct trace_option_dentry { | |||
363 | * @name: the name chosen to select it on the available_tracers file | 363 | * @name: the name chosen to select it on the available_tracers file |
364 | * @init: called when one switches to this tracer (echo name > current_tracer) | 364 | * @init: called when one switches to this tracer (echo name > current_tracer) |
365 | * @reset: called when one switches to another tracer | 365 | * @reset: called when one switches to another tracer |
366 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | 366 | * @start: called when tracing is unpaused (echo 1 > tracing_on) |
367 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | 367 | * @stop: called when tracing is paused (echo 0 > tracing_on) |
368 | * @update_thresh: called when tracing_thresh is updated | 368 | * @update_thresh: called when tracing_thresh is updated |
369 | * @open: called when the trace file is opened | 369 | * @open: called when the trace file is opened |
370 | * @pipe_open: called when the trace_pipe file is opened | 370 | * @pipe_open: called when the trace_pipe file is opened |
@@ -467,8 +467,6 @@ enum { | |||
467 | TRACE_INTERNAL_IRQ_BIT, | 467 | TRACE_INTERNAL_IRQ_BIT, |
468 | TRACE_INTERNAL_SIRQ_BIT, | 468 | TRACE_INTERNAL_SIRQ_BIT, |
469 | 469 | ||
470 | TRACE_CONTROL_BIT, | ||
471 | |||
472 | TRACE_BRANCH_BIT, | 470 | TRACE_BRANCH_BIT, |
473 | /* | 471 | /* |
474 | * Abuse of the trace_recursion. | 472 | * Abuse of the trace_recursion. |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index cc9f7a9319be..00df25fd86ef 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -334,7 +334,7 @@ static int perf_ftrace_function_register(struct perf_event *event) | |||
334 | { | 334 | { |
335 | struct ftrace_ops *ops = &event->ftrace_ops; | 335 | struct ftrace_ops *ops = &event->ftrace_ops; |
336 | 336 | ||
337 | ops->flags |= FTRACE_OPS_FL_CONTROL; | 337 | ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU; |
338 | ops->func = perf_ftrace_function_call; | 338 | ops->func = perf_ftrace_function_call; |
339 | return register_ftrace_function(ops); | 339 | return register_ftrace_function(ops); |
340 | } | 340 | } |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 4b5e8ed68d77..b38f617b6181 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -538,11 +538,12 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops, | |||
538 | list_add_rcu(&data->list, &file->triggers); | 538 | list_add_rcu(&data->list, &file->triggers); |
539 | ret++; | 539 | ret++; |
540 | 540 | ||
541 | update_cond_flag(file); | ||
541 | if (trace_event_trigger_enable_disable(file, 1) < 0) { | 542 | if (trace_event_trigger_enable_disable(file, 1) < 0) { |
542 | list_del_rcu(&data->list); | 543 | list_del_rcu(&data->list); |
544 | update_cond_flag(file); | ||
543 | ret--; | 545 | ret--; |
544 | } | 546 | } |
545 | update_cond_flag(file); | ||
546 | out: | 547 | out: |
547 | return ret; | 548 | return ret; |
548 | } | 549 | } |
@@ -570,8 +571,8 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | |||
570 | if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { | 571 | if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { |
571 | unregistered = true; | 572 | unregistered = true; |
572 | list_del_rcu(&data->list); | 573 | list_del_rcu(&data->list); |
573 | update_cond_flag(file); | ||
574 | trace_event_trigger_enable_disable(file, 0); | 574 | trace_event_trigger_enable_disable(file, 0); |
575 | update_cond_flag(file); | ||
575 | break; | 576 | break; |
576 | } | 577 | } |
577 | } | 578 | } |
@@ -1314,11 +1315,12 @@ static int event_enable_register_trigger(char *glob, | |||
1314 | list_add_rcu(&data->list, &file->triggers); | 1315 | list_add_rcu(&data->list, &file->triggers); |
1315 | ret++; | 1316 | ret++; |
1316 | 1317 | ||
1318 | update_cond_flag(file); | ||
1317 | if (trace_event_trigger_enable_disable(file, 1) < 0) { | 1319 | if (trace_event_trigger_enable_disable(file, 1) < 0) { |
1318 | list_del_rcu(&data->list); | 1320 | list_del_rcu(&data->list); |
1321 | update_cond_flag(file); | ||
1319 | ret--; | 1322 | ret--; |
1320 | } | 1323 | } |
1321 | update_cond_flag(file); | ||
1322 | out: | 1324 | out: |
1323 | return ret; | 1325 | return ret; |
1324 | } | 1326 | } |
@@ -1339,8 +1341,8 @@ static void event_enable_unregister_trigger(char *glob, | |||
1339 | (enable_data->file == test_enable_data->file)) { | 1341 | (enable_data->file == test_enable_data->file)) { |
1340 | unregistered = true; | 1342 | unregistered = true; |
1341 | list_del_rcu(&data->list); | 1343 | list_del_rcu(&data->list); |
1342 | update_cond_flag(file); | ||
1343 | trace_event_trigger_enable_disable(file, 0); | 1344 | trace_event_trigger_enable_disable(file, 0); |
1345 | update_cond_flag(file); | ||
1344 | break; | 1346 | break; |
1345 | } | 1347 | } |
1346 | } | 1348 | } |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 5c94e1012a91..cb18469e1f49 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
@@ -306,10 +306,12 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) | |||
306 | if (!cnt) | 306 | if (!cnt) |
307 | return 0; | 307 | return 0; |
308 | 308 | ||
309 | if (s->len <= s->readpos) | 309 | len = seq_buf_used(s); |
310 | |||
311 | if (len <= s->readpos) | ||
310 | return -EBUSY; | 312 | return -EBUSY; |
311 | 313 | ||
312 | len = seq_buf_used(s) - s->readpos; | 314 | len -= s->readpos; |
313 | if (cnt > len) | 315 | if (cnt > len) |
314 | cnt = len; | 316 | cnt = len; |
315 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 317 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); |
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc new file mode 100644 index 000000000000..773e276ff90b --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc | |||
@@ -0,0 +1,90 @@ | |||
1 | #!/bin/sh | ||
2 | # description: Test creation and deletion of trace instances | ||
3 | |||
4 | if [ ! -d instances ] ; then | ||
5 | echo "no instance directory with this kernel" | ||
6 | exit_unsupported; | ||
7 | fi | ||
8 | |||
9 | fail() { # mesg | ||
10 | rmdir x y z 2>/dev/null | ||
11 | echo $1 | ||
12 | set -e | ||
13 | exit $FAIL | ||
14 | } | ||
15 | |||
16 | cd instances | ||
17 | |||
18 | # we don't want to fail on error | ||
19 | set +e | ||
20 | |||
21 | mkdir x | ||
22 | rmdir x | ||
23 | result=$? | ||
24 | |||
25 | if [ $result -ne 0 ]; then | ||
26 | echo "instance rmdir not supported" | ||
27 | exit_unsupported | ||
28 | fi | ||
29 | |||
30 | instance_slam() { | ||
31 | while :; do | ||
32 | mkdir x | ||
33 | mkdir y | ||
34 | mkdir z | ||
35 | rmdir x | ||
36 | rmdir y | ||
37 | rmdir z | ||
38 | done 2>/dev/null | ||
39 | } | ||
40 | |||
41 | instance_slam & | ||
42 | x=`jobs -l` | ||
43 | p1=`echo $x | cut -d' ' -f2` | ||
44 | echo $p1 | ||
45 | |||
46 | instance_slam & | ||
47 | x=`jobs -l | tail -1` | ||
48 | p2=`echo $x | cut -d' ' -f2` | ||
49 | echo $p2 | ||
50 | |||
51 | instance_slam & | ||
52 | x=`jobs -l | tail -1` | ||
53 | p3=`echo $x | cut -d' ' -f2` | ||
54 | echo $p3 | ||
55 | |||
56 | instance_slam & | ||
57 | x=`jobs -l | tail -1` | ||
58 | p4=`echo $x | cut -d' ' -f2` | ||
59 | echo $p4 | ||
60 | |||
61 | instance_slam & | ||
62 | x=`jobs -l | tail -1` | ||
63 | p5=`echo $x | cut -d' ' -f2` | ||
64 | echo $p5 | ||
65 | |||
66 | ls -lR >/dev/null | ||
67 | sleep 1 | ||
68 | |||
69 | kill -1 $p1 | ||
70 | kill -1 $p2 | ||
71 | kill -1 $p3 | ||
72 | kill -1 $p4 | ||
73 | kill -1 $p5 | ||
74 | |||
75 | echo "Wait for processes to finish" | ||
76 | wait $p1 $p2 $p3 $p4 $p5 | ||
77 | echo "all processes finished, wait for cleanup" | ||
78 | |||
79 | mkdir x y z | ||
80 | ls x y z | ||
81 | rmdir x y z | ||
82 | for d in x y z; do | ||
83 | if [ -d $d ]; then | ||
84 | fail "instance $d still exists" | ||
85 | fi | ||
86 | done | ||
87 | |||
88 | set -e | ||
89 | |||
90 | exit 0 | ||