aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:33:19 -0400
commitb0fc494fae96a7089f3651cb451f461c7291244c (patch)
tree586412b9ecbd8e3f04e8cd90a31644f94ff3a7d8 /kernel
parent3d0833953e1b98b79ddf491dd49229eef9baeac1 (diff)
ftrace: add ftrace_enabled sysctl to disable mcount function
This patch adds back the sysctl ftrace_enabled. This time it is defaulted to on, if DYNAMIC_FTRACE is configured. When ftrace_enabled is disabled, the ftrace function is set to the stub return. If DYNAMIC_FTRACE is also configured, on ftrace_enabled = 0, the registered ftrace functions will all be set to jmps, but no more new calls to ftrace recording (used to find the ftrace calling sites) will be called. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/trace/ftrace.c125
2 files changed, 118 insertions, 18 deletions
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 29116652dca8..efaf7c5500e9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -46,6 +46,7 @@
46#include <linux/nfs_fs.h> 46#include <linux/nfs_fs.h>
47#include <linux/acpi.h> 47#include <linux/acpi.h>
48#include <linux/reboot.h> 48#include <linux/reboot.h>
49#include <linux/ftrace.h>
49 50
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/processor.h> 52#include <asm/processor.h>
@@ -455,6 +456,16 @@ static struct ctl_table kern_table[] = {
455 .mode = 0644, 456 .mode = 0644,
456 .proc_handler = &proc_dointvec, 457 .proc_handler = &proc_dointvec,
457 }, 458 },
459#ifdef CONFIG_FTRACE
460 {
461 .ctl_name = CTL_UNNUMBERED,
462 .procname = "ftrace_enabled",
463 .data = &ftrace_enabled,
464 .maxlen = sizeof(int),
465 .mode = 0644,
466 .proc_handler = &ftrace_enable_sysctl,
467 },
468#endif
458#ifdef CONFIG_KMOD 469#ifdef CONFIG_KMOD
459 { 470 {
460 .ctl_name = KERN_MODPROBE, 471 .ctl_name = KERN_MODPROBE,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d1ae2ba25274..d3de37299ba4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -20,12 +20,24 @@
20#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/ftrace.h> 21#include <linux/ftrace.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/sysctl.h>
23#include <linux/hash.h> 24#include <linux/hash.h>
24#include <linux/list.h> 25#include <linux/list.h>
25 26
26#include "trace.h" 27#include "trace.h"
27 28
29#ifdef CONFIG_DYNAMIC_FTRACE
30# define FTRACE_ENABLED_INIT 1
31#else
32# define FTRACE_ENABLED_INIT 0
33#endif
34
35int ftrace_enabled = FTRACE_ENABLED_INIT;
36static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
37
28static DEFINE_SPINLOCK(ftrace_lock); 38static DEFINE_SPINLOCK(ftrace_lock);
39static DEFINE_MUTEX(ftrace_sysctl_lock);
40
29static struct ftrace_ops ftrace_list_end __read_mostly = 41static struct ftrace_ops ftrace_list_end __read_mostly =
30{ 42{
31 .func = ftrace_stub, 43 .func = ftrace_stub,
@@ -78,14 +90,16 @@ static int notrace __register_ftrace_function(struct ftrace_ops *ops)
78 smp_wmb(); 90 smp_wmb();
79 ftrace_list = ops; 91 ftrace_list = ops;
80 92
81 /* 93 if (ftrace_enabled) {
82 * For one func, simply call it directly. 94 /*
83 * For more than one func, call the chain. 95 * For one func, simply call it directly.
84 */ 96 * For more than one func, call the chain.
85 if (ops->next == &ftrace_list_end) 97 */
86 ftrace_trace_function = ops->func; 98 if (ops->next == &ftrace_list_end)
87 else 99 ftrace_trace_function = ops->func;
88 ftrace_trace_function = ftrace_list_func; 100 else
101 ftrace_trace_function = ftrace_list_func;
102 }
89 103
90 spin_unlock(&ftrace_lock); 104 spin_unlock(&ftrace_lock);
91 105
@@ -120,10 +134,12 @@ static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
120 134
121 *p = (*p)->next; 135 *p = (*p)->next;
122 136
123 /* If we only have one func left, then call that directly */ 137 if (ftrace_enabled) {
124 if (ftrace_list == &ftrace_list_end || 138 /* If we only have one func left, then call that directly */
125 ftrace_list->next == &ftrace_list_end) 139 if (ftrace_list == &ftrace_list_end ||
126 ftrace_trace_function = ftrace_list->func; 140 ftrace_list->next == &ftrace_list_end)
141 ftrace_trace_function = ftrace_list->func;
142 }
127 143
128 out: 144 out:
129 spin_unlock(&ftrace_lock); 145 spin_unlock(&ftrace_lock);
@@ -263,7 +279,8 @@ static void notrace ftrace_startup(void)
263 goto out; 279 goto out;
264 __unregister_ftrace_function(&ftrace_shutdown_ops); 280 __unregister_ftrace_function(&ftrace_shutdown_ops);
265 281
266 ftrace_run_startup_code(); 282 if (ftrace_enabled)
283 ftrace_run_startup_code();
267 out: 284 out:
268 mutex_unlock(&ftraced_lock); 285 mutex_unlock(&ftraced_lock);
269} 286}
@@ -275,13 +292,32 @@ static void notrace ftrace_shutdown(void)
275 if (ftraced_suspend) 292 if (ftraced_suspend)
276 goto out; 293 goto out;
277 294
278 ftrace_run_shutdown_code(); 295 if (ftrace_enabled)
296 ftrace_run_shutdown_code();
279 297
280 __register_ftrace_function(&ftrace_shutdown_ops); 298 __register_ftrace_function(&ftrace_shutdown_ops);
281 out: 299 out:
282 mutex_unlock(&ftraced_lock); 300 mutex_unlock(&ftraced_lock);
283} 301}
284 302
303static void notrace ftrace_startup_sysctl(void)
304{
305 mutex_lock(&ftraced_lock);
306 /* ftraced_suspend is true if we want ftrace running */
307 if (ftraced_suspend)
308 ftrace_run_startup_code();
309 mutex_unlock(&ftraced_lock);
310}
311
312static void notrace ftrace_shutdown_sysctl(void)
313{
314 mutex_lock(&ftraced_lock);
315 /* ftraced_suspend is true if ftrace is running */
316 if (ftraced_suspend)
317 ftrace_run_shutdown_code();
318 mutex_unlock(&ftraced_lock);
319}
320
285static cycle_t ftrace_update_time; 321static cycle_t ftrace_update_time;
286static unsigned long ftrace_update_cnt; 322static unsigned long ftrace_update_cnt;
287unsigned long ftrace_update_tot_cnt; 323unsigned long ftrace_update_tot_cnt;
@@ -341,8 +377,9 @@ static int notrace ftraced(void *ignore)
341 /* check once a second */ 377 /* check once a second */
342 schedule_timeout(HZ); 378 schedule_timeout(HZ);
343 379
380 mutex_lock(&ftrace_sysctl_lock);
344 mutex_lock(&ftraced_lock); 381 mutex_lock(&ftraced_lock);
345 if (ftraced_trigger && !ftraced_suspend) { 382 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
346 ftrace_record_suspend++; 383 ftrace_record_suspend++;
347 ftrace_update_code(); 384 ftrace_update_code();
348 usecs = nsecs_to_usecs(ftrace_update_time); 385 usecs = nsecs_to_usecs(ftrace_update_time);
@@ -360,6 +397,7 @@ static int notrace ftraced(void *ignore)
360 ftrace_record_suspend--; 397 ftrace_record_suspend--;
361 } 398 }
362 mutex_unlock(&ftraced_lock); 399 mutex_unlock(&ftraced_lock);
400 mutex_unlock(&ftrace_sysctl_lock);
363 401
364 ftrace_shutdown_replenish(); 402 ftrace_shutdown_replenish();
365 403
@@ -389,8 +427,10 @@ static int __init notrace ftrace_shutdown_init(void)
389 427
390core_initcall(ftrace_shutdown_init); 428core_initcall(ftrace_shutdown_init);
391#else 429#else
392# define ftrace_startup() do { } while (0) 430# define ftrace_startup() do { } while (0)
393# define ftrace_shutdown() do { } while (0) 431# define ftrace_shutdown() do { } while (0)
432# define ftrace_startup_sysctl() do { } while (0)
433# define ftrace_shutdown_sysctl() do { } while (0)
394#endif /* CONFIG_DYNAMIC_FTRACE */ 434#endif /* CONFIG_DYNAMIC_FTRACE */
395 435
396/** 436/**
@@ -406,9 +446,15 @@ core_initcall(ftrace_shutdown_init);
406 */ 446 */
407int register_ftrace_function(struct ftrace_ops *ops) 447int register_ftrace_function(struct ftrace_ops *ops)
408{ 448{
449 int ret;
450
451 mutex_lock(&ftrace_sysctl_lock);
409 ftrace_startup(); 452 ftrace_startup();
410 453
411 return __register_ftrace_function(ops); 454 ret = __register_ftrace_function(ops);
455 mutex_unlock(&ftrace_sysctl_lock);
456
457 return ret;
412} 458}
413 459
414/** 460/**
@@ -421,10 +467,53 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
421{ 467{
422 int ret; 468 int ret;
423 469
470 mutex_lock(&ftrace_sysctl_lock);
424 ret = __unregister_ftrace_function(ops); 471 ret = __unregister_ftrace_function(ops);
425 472
426 if (ftrace_list == &ftrace_list_end) 473 if (ftrace_list == &ftrace_list_end)
427 ftrace_shutdown(); 474 ftrace_shutdown();
428 475
476 mutex_unlock(&ftrace_sysctl_lock);
477
478 return ret;
479}
480
481notrace int
482ftrace_enable_sysctl(struct ctl_table *table, int write,
483 struct file *filp, void __user *buffer, size_t *lenp,
484 loff_t *ppos)
485{
486 int ret;
487
488 mutex_lock(&ftrace_sysctl_lock);
489
490 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
491
492 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
493 goto out;
494
495 last_ftrace_enabled = ftrace_enabled;
496
497 if (ftrace_enabled) {
498
499 ftrace_startup_sysctl();
500
501 /* we are starting ftrace again */
502 if (ftrace_list != &ftrace_list_end) {
503 if (ftrace_list->next == &ftrace_list_end)
504 ftrace_trace_function = ftrace_list->func;
505 else
506 ftrace_trace_function = ftrace_list_func;
507 }
508
509 } else {
510 /* stopping ftrace calls (just send to ftrace_stub) */
511 ftrace_trace_function = ftrace_stub;
512
513 ftrace_shutdown_sysctl();
514 }
515
516 out:
517 mutex_unlock(&ftrace_sysctl_lock);
429 return ret; 518 return ret;
430} 519}