diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-10-11 03:45:32 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2017-10-16 18:13:38 -0400 |
commit | b3a88803ac5b4bda26017b485c8722a8487fefb7 (patch) | |
tree | 542d5da25fe48163a8c000ca9a26f4defddfa1fa | |
parent | 1dd311e6dcda4020c603bcf9f390a577d439d509 (diff) |
ftrace: Kill FTRACE_OPS_FL_PER_CPU
The one and only user of FTRACE_OPS_FL_PER_CPU is gone, remove the
lot.
Link: http://lkml.kernel.org/r/20171011080224.372422809@infradead.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace.h | 83 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 55 |
2 files changed, 20 insertions, 118 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1f8545caa691..252e334e7b5f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -102,10 +102,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
102 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 102 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
103 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 103 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
104 | * allocated ftrace_ops which need special care | 104 | * allocated ftrace_ops which need special care |
105 | * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops | ||
106 | * could be controlled by following calls: | ||
107 | * ftrace_function_local_enable | ||
108 | * ftrace_function_local_disable | ||
109 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called | 105 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
110 | * and passed to the callback. If this flag is set, but the | 106 | * and passed to the callback. If this flag is set, but the |
111 | * architecture does not support passing regs | 107 | * architecture does not support passing regs |
@@ -149,21 +145,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
149 | enum { | 145 | enum { |
150 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 146 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
151 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, | 147 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
152 | FTRACE_OPS_FL_PER_CPU = 1 << 2, | 148 | FTRACE_OPS_FL_SAVE_REGS = 1 << 2, |
153 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, | 149 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3, |
154 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, | 150 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4, |
155 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, | 151 | FTRACE_OPS_FL_STUB = 1 << 5, |
156 | FTRACE_OPS_FL_STUB = 1 << 6, | 152 | FTRACE_OPS_FL_INITIALIZED = 1 << 6, |
157 | FTRACE_OPS_FL_INITIALIZED = 1 << 7, | 153 | FTRACE_OPS_FL_DELETED = 1 << 7, |
158 | FTRACE_OPS_FL_DELETED = 1 << 8, | 154 | FTRACE_OPS_FL_ADDING = 1 << 8, |
159 | FTRACE_OPS_FL_ADDING = 1 << 9, | 155 | FTRACE_OPS_FL_REMOVING = 1 << 9, |
160 | FTRACE_OPS_FL_REMOVING = 1 << 10, | 156 | FTRACE_OPS_FL_MODIFYING = 1 << 10, |
161 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 157 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11, |
162 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | 158 | FTRACE_OPS_FL_IPMODIFY = 1 << 12, |
163 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | 159 | FTRACE_OPS_FL_PID = 1 << 13, |
164 | FTRACE_OPS_FL_PID = 1 << 14, | 160 | FTRACE_OPS_FL_RCU = 1 << 14, |
165 | FTRACE_OPS_FL_RCU = 1 << 15, | 161 | FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15, |
166 | FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16, | ||
167 | }; | 162 | }; |
168 | 163 | ||
169 | #ifdef CONFIG_DYNAMIC_FTRACE | 164 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -198,7 +193,6 @@ struct ftrace_ops { | |||
198 | unsigned long flags; | 193 | unsigned long flags; |
199 | void *private; | 194 | void *private; |
200 | ftrace_func_t saved_func; | 195 | ftrace_func_t saved_func; |
201 | int __percpu *disabled; | ||
202 | #ifdef CONFIG_DYNAMIC_FTRACE | 196 | #ifdef CONFIG_DYNAMIC_FTRACE |
203 | struct ftrace_ops_hash local_hash; | 197 | struct ftrace_ops_hash local_hash; |
204 | struct ftrace_ops_hash *func_hash; | 198 | struct ftrace_ops_hash *func_hash; |
@@ -230,55 +224,6 @@ int register_ftrace_function(struct ftrace_ops *ops); | |||
230 | int unregister_ftrace_function(struct ftrace_ops *ops); | 224 | int unregister_ftrace_function(struct ftrace_ops *ops); |
231 | void clear_ftrace_function(void); | 225 | void clear_ftrace_function(void); |
232 | 226 | ||
233 | /** | ||
234 | * ftrace_function_local_enable - enable ftrace_ops on current cpu | ||
235 | * | ||
236 | * This function enables tracing on current cpu by decreasing | ||
237 | * the per cpu control variable. | ||
238 | * It must be called with preemption disabled and only on ftrace_ops | ||
239 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption | ||
240 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | ||
241 | */ | ||
242 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | ||
243 | { | ||
244 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) | ||
245 | return; | ||
246 | |||
247 | (*this_cpu_ptr(ops->disabled))--; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * ftrace_function_local_disable - disable ftrace_ops on current cpu | ||
252 | * | ||
253 | * This function disables tracing on current cpu by increasing | ||
254 | * the per cpu control variable. | ||
255 | * It must be called with preemption disabled and only on ftrace_ops | ||
256 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption | ||
257 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | ||
258 | */ | ||
259 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | ||
260 | { | ||
261 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) | ||
262 | return; | ||
263 | |||
264 | (*this_cpu_ptr(ops->disabled))++; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * ftrace_function_local_disabled - returns ftrace_ops disabled value | ||
269 | * on current cpu | ||
270 | * | ||
271 | * This function returns value of ftrace_ops::disabled on current cpu. | ||
272 | * It must be called with preemption disabled and only on ftrace_ops | ||
273 | * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption | ||
274 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | ||
275 | */ | ||
276 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | ||
277 | { | ||
278 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); | ||
279 | return *this_cpu_ptr(ops->disabled); | ||
280 | } | ||
281 | |||
282 | extern void ftrace_stub(unsigned long a0, unsigned long a1, | 227 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
283 | struct ftrace_ops *op, struct pt_regs *regs); | 228 | struct ftrace_ops *op, struct pt_regs *regs); |
284 | 229 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e0a98225666b..2fd3edaec6de 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -203,30 +203,6 @@ void clear_ftrace_function(void) | |||
203 | ftrace_trace_function = ftrace_stub; | 203 | ftrace_trace_function = ftrace_stub; |
204 | } | 204 | } |
205 | 205 | ||
206 | static void per_cpu_ops_disable_all(struct ftrace_ops *ops) | ||
207 | { | ||
208 | int cpu; | ||
209 | |||
210 | for_each_possible_cpu(cpu) | ||
211 | *per_cpu_ptr(ops->disabled, cpu) = 1; | ||
212 | } | ||
213 | |||
214 | static int per_cpu_ops_alloc(struct ftrace_ops *ops) | ||
215 | { | ||
216 | int __percpu *disabled; | ||
217 | |||
218 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) | ||
219 | return -EINVAL; | ||
220 | |||
221 | disabled = alloc_percpu(int); | ||
222 | if (!disabled) | ||
223 | return -ENOMEM; | ||
224 | |||
225 | ops->disabled = disabled; | ||
226 | per_cpu_ops_disable_all(ops); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static void ftrace_sync(struct work_struct *work) | 206 | static void ftrace_sync(struct work_struct *work) |
231 | { | 207 | { |
232 | /* | 208 | /* |
@@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) | |||
262 | * If this is a dynamic, RCU, or per CPU ops, or we force list func, | 238 | * If this is a dynamic, RCU, or per CPU ops, or we force list func, |
263 | * then it needs to call the list anyway. | 239 | * then it needs to call the list anyway. |
264 | */ | 240 | */ |
265 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU | | 241 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || |
266 | FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) | 242 | FTRACE_FORCE_LIST_FUNC) |
267 | return ftrace_ops_list_func; | 243 | return ftrace_ops_list_func; |
268 | 244 | ||
269 | return ftrace_ops_get_func(ops); | 245 | return ftrace_ops_get_func(ops); |
@@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
422 | if (!core_kernel_data((unsigned long)ops)) | 398 | if (!core_kernel_data((unsigned long)ops)) |
423 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 399 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
424 | 400 | ||
425 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) { | ||
426 | if (per_cpu_ops_alloc(ops)) | ||
427 | return -ENOMEM; | ||
428 | } | ||
429 | |||
430 | add_ftrace_ops(&ftrace_ops_list, ops); | 401 | add_ftrace_ops(&ftrace_ops_list, ops); |
431 | 402 | ||
432 | /* Always save the function, and reset at unregistering */ | 403 | /* Always save the function, and reset at unregistering */ |
@@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) | |||
2727 | { | 2698 | { |
2728 | } | 2699 | } |
2729 | 2700 | ||
2730 | static void per_cpu_ops_free(struct ftrace_ops *ops) | ||
2731 | { | ||
2732 | free_percpu(ops->disabled); | ||
2733 | } | ||
2734 | |||
2735 | static void ftrace_startup_enable(int command) | 2701 | static void ftrace_startup_enable(int command) |
2736 | { | 2702 | { |
2737 | if (saved_ftrace_func != ftrace_trace_function) { | 2703 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2833 | * not currently active, we can just free them | 2799 | * not currently active, we can just free them |
2834 | * without synchronizing all CPUs. | 2800 | * without synchronizing all CPUs. |
2835 | */ | 2801 | */ |
2836 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) | 2802 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
2837 | goto free_ops; | 2803 | goto free_ops; |
2838 | 2804 | ||
2839 | return 0; | 2805 | return 0; |
@@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2880 | * The same goes for freeing the per_cpu data of the per_cpu | 2846 | * The same goes for freeing the per_cpu data of the per_cpu |
2881 | * ops. | 2847 | * ops. |
2882 | */ | 2848 | */ |
2883 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { | 2849 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { |
2884 | /* | 2850 | /* |
2885 | * We need to do a hard force of sched synchronization. | 2851 | * We need to do a hard force of sched synchronization. |
2886 | * This is because we use preempt_disable() to do RCU, but | 2852 | * This is because we use preempt_disable() to do RCU, but |
@@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2903 | 2869 | ||
2904 | free_ops: | 2870 | free_ops: |
2905 | arch_ftrace_trampoline_free(ops); | 2871 | arch_ftrace_trampoline_free(ops); |
2906 | |||
2907 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) | ||
2908 | per_cpu_ops_free(ops); | ||
2909 | } | 2872 | } |
2910 | 2873 | ||
2911 | return 0; | 2874 | return 0; |
@@ -6355,10 +6318,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
6355 | * If any of the above fails then the op->func() is not executed. | 6318 | * If any of the above fails then the op->func() is not executed. |
6356 | */ | 6319 | */ |
6357 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && | 6320 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && |
6358 | (!(op->flags & FTRACE_OPS_FL_PER_CPU) || | ||
6359 | !ftrace_function_local_disabled(op)) && | ||
6360 | ftrace_ops_test(op, ip, regs)) { | 6321 | ftrace_ops_test(op, ip, regs)) { |
6361 | |||
6362 | if (FTRACE_WARN_ON(!op->func)) { | 6322 | if (FTRACE_WARN_ON(!op->func)) { |
6363 | pr_warn("op=%p %pS\n", op, op); | 6323 | pr_warn("op=%p %pS\n", op, op); |
6364 | goto out; | 6324 | goto out; |
@@ -6416,10 +6376,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, | |||
6416 | 6376 | ||
6417 | preempt_disable_notrace(); | 6377 | preempt_disable_notrace(); |
6418 | 6378 | ||
6419 | if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || | 6379 | op->func(ip, parent_ip, op, regs); |
6420 | !ftrace_function_local_disabled(op)) { | ||
6421 | op->func(ip, parent_ip, op, regs); | ||
6422 | } | ||
6423 | 6380 | ||
6424 | preempt_enable_notrace(); | 6381 | preempt_enable_notrace(); |
6425 | trace_clear_recursion(bit); | 6382 | trace_clear_recursion(bit); |
@@ -6443,7 +6400,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | |||
6443 | * or does per cpu logic, then we need to call the assist handler. | 6400 | * or does per cpu logic, then we need to call the assist handler. |
6444 | */ | 6401 | */ |
6445 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || | 6402 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || |
6446 | ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU)) | 6403 | ops->flags & FTRACE_OPS_FL_RCU) |
6447 | return ftrace_ops_assist_func; | 6404 | return ftrace_ops_assist_func; |
6448 | 6405 | ||
6449 | return ops->func; | 6406 | return ops->func; |