diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 23:04:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 23:04:15 -0500 |
commit | c17488d06666153a14dd3f21bd10eba58383f6c1 (patch) | |
tree | 8a8dfaa2b2692f8b4eb20fe7e4266036f692fbdc /include/linux/ftrace.h | |
parent | 34a9304a96d6351c2d35dcdc9293258378fc0bd8 (diff) | |
parent | 5156dca34a3e1e1edac2d0dabf43d8632909b7aa (diff) |
Merge tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"Not much new with tracing for this release. Mostly just clean ups and
minor fixes.
Here's what else is new:
- A new TRACE_EVENT_FN_COND macro, combining both _FN and _COND for
those that want both.
- New selftest to test the instance create and delete
- Better debug output when ftrace fails"
* tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (24 commits)
ftrace: Fix the race between ftrace and insmod
ftrace: Add infrastructure for delayed enabling of module functions
x86: ftrace: Fix the comments for ftrace_modify_code_direct()
tracing: Fix comment to use tracing_on over tracing_enable
metag: ftrace: Fix the comments for ftrace_modify_code
sh: ftrace: Fix the comments for ftrace_modify_code()
ia64: ftrace: Fix the comments for ftrace_modify_code()
ftrace: Clean up ftrace_module_init() code
ftrace: Join functions ftrace_module_init() and ftrace_init_module()
tracing: Introduce TRACE_EVENT_FN_COND macro
tracing: Use seq_buf_used() in seq_buf_to_user() instead of len
bpf: Constify bpf_verifier_ops structure
ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too
ftrace: Remove use of control list and ops
ftrace: Fix output of enabled_functions for showing tramp
ftrace: Fix a typo in comment
ftrace: Show all tramps registered to a record on ftrace_bug()
ftrace: Add variable ftrace_expected for archs to show expected code
ftrace: Add new type to distinguish what kind of ftrace_bug()
tracing: Update cond flag when enabling or disabling a trigger
...
Diffstat (limited to 'include/linux/ftrace.h')
-rw-r--r-- | include/linux/ftrace.h | 56 |
1 files changed, 37 insertions, 19 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 60048c50404e..0639dcc98195 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
78 | * allocated ftrace_ops which need special care | 78 | * allocated ftrace_ops which need special care |
79 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | 79 | * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops |
80 | * could be controled by following calls: | 80 | * could be controlled by following calls: |
81 | * ftrace_function_local_enable | 81 | * ftrace_function_local_enable |
82 | * ftrace_function_local_disable | 82 | * ftrace_function_local_disable |
83 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called | 83 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
@@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
121 | enum { | 121 | enum { |
122 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 122 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
123 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, | 123 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
124 | FTRACE_OPS_FL_CONTROL = 1 << 2, | 124 | FTRACE_OPS_FL_PER_CPU = 1 << 2, |
125 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, | 125 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, |
126 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, | 126 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, |
127 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, | 127 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, |
@@ -134,6 +134,7 @@ enum { | |||
134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | 134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, |
135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | 135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, |
136 | FTRACE_OPS_FL_PID = 1 << 14, | 136 | FTRACE_OPS_FL_PID = 1 << 14, |
137 | FTRACE_OPS_FL_RCU = 1 << 15, | ||
137 | }; | 138 | }; |
138 | 139 | ||
139 | #ifdef CONFIG_DYNAMIC_FTRACE | 140 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -146,11 +147,11 @@ struct ftrace_ops_hash { | |||
146 | #endif | 147 | #endif |
147 | 148 | ||
148 | /* | 149 | /* |
149 | * Note, ftrace_ops can be referenced outside of RCU protection. | 150 | * Note, ftrace_ops can be referenced outside of RCU protection, unless |
150 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | 151 | * the RCU flag is set. If ftrace_ops is allocated and not part of kernel |
151 | * allocated and not part of kernel core data, the unregistering of it will | 152 | * core data, the unregistering of it will perform a scheduling on all CPUs |
152 | * perform a scheduling on all CPUs to make sure that there are no more users. | 153 | * to make sure that there are no more users. Depending on the load of the |
153 | * Depending on the load of the system that may take a bit of time. | 154 | * system that may take a bit of time. |
154 | * | 155 | * |
155 | * Any private data added must also take care not to be freed and if private | 156 | * Any private data added must also take care not to be freed and if private |
156 | * data is added to a ftrace_ops that is in core code, the user of the | 157 | * data is added to a ftrace_ops that is in core code, the user of the |
@@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops); | |||
196 | void clear_ftrace_function(void); | 197 | void clear_ftrace_function(void); |
197 | 198 | ||
198 | /** | 199 | /** |
199 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu | 200 | * ftrace_function_local_enable - enable ftrace_ops on current cpu |
200 | * | 201 | * |
201 | * This function enables tracing on current cpu by decreasing | 202 | * This function enables tracing on current cpu by decreasing |
202 | * the per cpu control variable. | 203 | * the per cpu control variable. |
203 | * It must be called with preemption disabled and only on ftrace_ops | 204 | * It must be called with preemption disabled and only on ftrace_ops |
204 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 205 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
205 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 206 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
206 | */ | 207 | */ |
207 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | 208 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) |
208 | { | 209 | { |
209 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 210 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) |
210 | return; | 211 | return; |
211 | 212 | ||
212 | (*this_cpu_ptr(ops->disabled))--; | 213 | (*this_cpu_ptr(ops->disabled))--; |
213 | } | 214 | } |
214 | 215 | ||
215 | /** | 216 | /** |
216 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu | 217 | * ftrace_function_local_disable - disable ftrace_ops on current cpu |
217 | * | 218 | * |
218 | * This function enables tracing on current cpu by decreasing | 219 | * This function disables tracing on current cpu by increasing |
219 | * the per cpu control variable. | 220 | * the per cpu control variable. |
220 | * It must be called with preemption disabled and only on ftrace_ops | 221 | * It must be called with preemption disabled and only on ftrace_ops |
221 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 222 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
222 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 223 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
223 | */ | 224 | */ |
224 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | 225 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) |
225 | { | 226 | { |
226 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | 227 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) |
227 | return; | 228 | return; |
228 | 229 | ||
229 | (*this_cpu_ptr(ops->disabled))++; | 230 | (*this_cpu_ptr(ops->disabled))++; |
@@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | |||
235 | * | 236 | * |
236 | * This function returns value of ftrace_ops::disabled on current cpu. | 237 | * This function returns value of ftrace_ops::disabled on current cpu. |
237 | * It must be called with preemption disabled and only on ftrace_ops | 238 | * It must be called with preemption disabled and only on ftrace_ops |
238 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | 239 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption |
239 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | 240 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. |
240 | */ | 241 | */ |
241 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | 242 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) |
242 | { | 243 | { |
243 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); | 244 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); |
244 | return *this_cpu_ptr(ops->disabled); | 245 | return *this_cpu_ptr(ops->disabled); |
245 | } | 246 | } |
246 | 247 | ||
@@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void); | |||
296 | 297 | ||
297 | struct dyn_ftrace; | 298 | struct dyn_ftrace; |
298 | 299 | ||
300 | enum ftrace_bug_type { | ||
301 | FTRACE_BUG_UNKNOWN, | ||
302 | FTRACE_BUG_INIT, | ||
303 | FTRACE_BUG_NOP, | ||
304 | FTRACE_BUG_CALL, | ||
305 | FTRACE_BUG_UPDATE, | ||
306 | }; | ||
307 | extern enum ftrace_bug_type ftrace_bug_type; | ||
308 | |||
309 | /* | ||
310 | * Archs can set this to point to a variable that holds the value that was | ||
311 | * expected at the call site before calling ftrace_bug(). | ||
312 | */ | ||
313 | extern const void *ftrace_expected; | ||
314 | |||
299 | void ftrace_bug(int err, struct dyn_ftrace *rec); | 315 | void ftrace_bug(int err, struct dyn_ftrace *rec); |
300 | 316 | ||
301 | struct seq_file; | 317 | struct seq_file; |
@@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr); | |||
341 | * REGS - the record wants the function to save regs | 357 | * REGS - the record wants the function to save regs |
342 | * REGS_EN - the function is set up to save regs. | 358 | * REGS_EN - the function is set up to save regs. |
343 | * IPMODIFY - the record allows for the IP address to be changed. | 359 | * IPMODIFY - the record allows for the IP address to be changed. |
360 | * DISABLED - the record is not ready to be touched yet | ||
344 | * | 361 | * |
345 | * When a new ftrace_ops is registered and wants a function to save | 362 | * When a new ftrace_ops is registered and wants a function to save |
346 | * pt_regs, the rec->flag REGS is set. When the function has been | 363 | * pt_regs, the rec->flag REGS is set. When the function has been |
@@ -355,10 +372,11 @@ enum { | |||
355 | FTRACE_FL_TRAMP = (1UL << 28), | 372 | FTRACE_FL_TRAMP = (1UL << 28), |
356 | FTRACE_FL_TRAMP_EN = (1UL << 27), | 373 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
357 | FTRACE_FL_IPMODIFY = (1UL << 26), | 374 | FTRACE_FL_IPMODIFY = (1UL << 26), |
375 | FTRACE_FL_DISABLED = (1UL << 25), | ||
358 | }; | 376 | }; |
359 | 377 | ||
360 | #define FTRACE_REF_MAX_SHIFT 26 | 378 | #define FTRACE_REF_MAX_SHIFT 25 |
361 | #define FTRACE_FL_BITS 6 | 379 | #define FTRACE_FL_BITS 7 |
362 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | 380 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
363 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | 381 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) |
364 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | 382 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |