diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-05-27 20:48:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-02 06:50:04 -0400 |
commit | ad90c0e3ce8d20d6873b57e36181ef6d7a0097fe (patch) | |
tree | 34af559928f9f2403c72156b672578d28f790a4f | |
parent | 76094a2cf46e4ab776055d4086615b884408568c (diff) |
ftrace: user update and disable dynamic ftrace daemon
In dynamic ftrace, the mcount function starts off pointing to a stub
function that just returns.
On start up, the call to the stub is modified to point to a "record_ip"
function. The job of the record_ip function is to add the function to
a pre-allocated hash list. If the function is already there, it simply is
ignored, otherwise it is added to the list.
Later, a ftraced daemon wakes up and calls kstop_machine if any functions
have been recorded, and changes the calls to the recorded functions to
a simple nop. If no functions were recorded, the daemon goes back to sleep.
The daemon wakes up once a second to see if it needs to update any newly
recorded functions into nops. Usually it does not, but if a lot of code
has been executed for the first time in the kernel, the ftraced daemon
will call kstop_machine to update those into nops.
The problem currently is that there's no way to stop the daemon from doing
this, and it can cause unneeded latencies (800us which for some is bothersome).
This patch adds a new file /debugfs/tracing/ftraced_enabled. If the daemon
is active, reading this will return "enabled\n" and "disabled\n" when the
daemon is not running. To disable the daemon, the user can echo "0" or
"disable" into this file, and "1" or "enable" to re-enable the daemon.
Since the daemon is used to convert the functions into nops to increase
the performance of the system, I also added that anytime something is
written into the ftraced_enabled file, kstop_machine will run if there
are new functions that have been detected that need to be converted.
This way the user can disable the daemon but still be able to control the
conversion of the mcount calls to nops by simply,
"echo 0 > /debugfs/tracing/ftraced_enabled"
when they need to do more conversions.
To see the number of converted functions:
"cat /debugfs/tracing/dyn_ftrace_total_info"
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/ftrace.h | 6 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 157 |
2 files changed, 116 insertions, 47 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b482fe88bc04..623819433ed5 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -72,9 +72,15 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); | |||
72 | extern void ftrace_caller(void); | 72 | extern void ftrace_caller(void); |
73 | extern void ftrace_call(void); | 73 | extern void ftrace_call(void); |
74 | extern void mcount_call(void); | 74 | extern void mcount_call(void); |
75 | |||
76 | void ftrace_disable_daemon(void); | ||
77 | void ftrace_enable_daemon(void); | ||
78 | |||
75 | #else | 79 | #else |
76 | # define ftrace_force_update() ({ 0; }) | 80 | # define ftrace_force_update() ({ 0; }) |
77 | # define ftrace_set_filter(buf, len, reset) do { } while (0) | 81 | # define ftrace_set_filter(buf, len, reset) do { } while (0) |
82 | # define ftrace_disable_daemon() do { } while (0) | ||
83 | # define ftrace_enable_daemon() do { } while (0) | ||
78 | #endif | 84 | #endif |
79 | 85 | ||
80 | /* totally disable ftrace - can not re-enable after this */ | 86 | /* totally disable ftrace - can not re-enable after this */ |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1843edc098a6..f762f5a2d331 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -151,8 +151,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
151 | #ifdef CONFIG_DYNAMIC_FTRACE | 151 | #ifdef CONFIG_DYNAMIC_FTRACE |
152 | 152 | ||
153 | static struct task_struct *ftraced_task; | 153 | static struct task_struct *ftraced_task; |
154 | static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters); | ||
155 | static unsigned long ftraced_iteration_counter; | ||
156 | 154 | ||
157 | enum { | 155 | enum { |
158 | FTRACE_ENABLE_CALLS = (1 << 0), | 156 | FTRACE_ENABLE_CALLS = (1 << 0), |
@@ -189,6 +187,7 @@ static struct ftrace_page *ftrace_pages; | |||
189 | 187 | ||
190 | static int ftraced_trigger; | 188 | static int ftraced_trigger; |
191 | static int ftraced_suspend; | 189 | static int ftraced_suspend; |
190 | static int ftraced_stop; | ||
192 | 191 | ||
193 | static int ftrace_record_suspend; | 192 | static int ftrace_record_suspend; |
194 | 193 | ||
@@ -474,14 +473,21 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
474 | return 1; | 473 | return 1; |
475 | } | 474 | } |
476 | 475 | ||
476 | static int __ftrace_update_code(void *ignore); | ||
477 | |||
477 | static int __ftrace_modify_code(void *data) | 478 | static int __ftrace_modify_code(void *data) |
478 | { | 479 | { |
479 | unsigned long addr; | 480 | unsigned long addr; |
480 | int *command = data; | 481 | int *command = data; |
481 | 482 | ||
482 | if (*command & FTRACE_ENABLE_CALLS) | 483 | if (*command & FTRACE_ENABLE_CALLS) { |
484 | /* | ||
485 | * Update any recorded ips now that we have the | ||
486 | * machine stopped | ||
487 | */ | ||
488 | __ftrace_update_code(NULL); | ||
483 | ftrace_replace_code(1); | 489 | ftrace_replace_code(1); |
484 | else if (*command & FTRACE_DISABLE_CALLS) | 490 | } else if (*command & FTRACE_DISABLE_CALLS) |
485 | ftrace_replace_code(0); | 491 | ftrace_replace_code(0); |
486 | 492 | ||
487 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 493 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
@@ -503,6 +509,25 @@ static void ftrace_run_update_code(int command) | |||
503 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); | 509 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); |
504 | } | 510 | } |
505 | 511 | ||
512 | void ftrace_disable_daemon(void) | ||
513 | { | ||
514 | /* Stop the daemon from calling kstop_machine */ | ||
515 | mutex_lock(&ftraced_lock); | ||
516 | ftraced_stop = 1; | ||
517 | mutex_unlock(&ftraced_lock); | ||
518 | |||
519 | ftrace_force_update(); | ||
520 | } | ||
521 | |||
522 | void ftrace_enable_daemon(void) | ||
523 | { | ||
524 | mutex_lock(&ftraced_lock); | ||
525 | ftraced_stop = 0; | ||
526 | mutex_unlock(&ftraced_lock); | ||
527 | |||
528 | ftrace_force_update(); | ||
529 | } | ||
530 | |||
506 | static ftrace_func_t saved_ftrace_func; | 531 | static ftrace_func_t saved_ftrace_func; |
507 | 532 | ||
508 | static void ftrace_startup(void) | 533 | static void ftrace_startup(void) |
@@ -603,6 +628,7 @@ static int __ftrace_update_code(void *ignore) | |||
603 | int i; | 628 | int i; |
604 | 629 | ||
605 | /* Don't be recording funcs now */ | 630 | /* Don't be recording funcs now */ |
631 | ftrace_record_suspend++; | ||
606 | save_ftrace_enabled = ftrace_enabled; | 632 | save_ftrace_enabled = ftrace_enabled; |
607 | ftrace_enabled = 0; | 633 | ftrace_enabled = 0; |
608 | 634 | ||
@@ -628,18 +654,23 @@ static int __ftrace_update_code(void *ignore) | |||
628 | stop = ftrace_now(raw_smp_processor_id()); | 654 | stop = ftrace_now(raw_smp_processor_id()); |
629 | ftrace_update_time = stop - start; | 655 | ftrace_update_time = stop - start; |
630 | ftrace_update_tot_cnt += ftrace_update_cnt; | 656 | ftrace_update_tot_cnt += ftrace_update_cnt; |
657 | ftraced_trigger = 0; | ||
631 | 658 | ||
632 | ftrace_enabled = save_ftrace_enabled; | 659 | ftrace_enabled = save_ftrace_enabled; |
660 | ftrace_record_suspend--; | ||
633 | 661 | ||
634 | return 0; | 662 | return 0; |
635 | } | 663 | } |
636 | 664 | ||
637 | static void ftrace_update_code(void) | 665 | static int ftrace_update_code(void) |
638 | { | 666 | { |
639 | if (unlikely(ftrace_disabled)) | 667 | if (unlikely(ftrace_disabled) || |
640 | return; | 668 | !ftrace_enabled || !ftraced_trigger) |
669 | return 0; | ||
641 | 670 | ||
642 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); | 671 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); |
672 | |||
673 | return 1; | ||
643 | } | 674 | } |
644 | 675 | ||
645 | static int ftraced(void *ignore) | 676 | static int ftraced(void *ignore) |
@@ -658,14 +689,13 @@ static int ftraced(void *ignore) | |||
658 | 689 | ||
659 | mutex_lock(&ftrace_sysctl_lock); | 690 | mutex_lock(&ftrace_sysctl_lock); |
660 | mutex_lock(&ftraced_lock); | 691 | mutex_lock(&ftraced_lock); |
661 | if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) { | 692 | if (!ftraced_suspend && !ftraced_stop && |
662 | ftrace_record_suspend++; | 693 | ftrace_update_code()) { |
663 | ftrace_update_code(); | ||
664 | usecs = nsecs_to_usecs(ftrace_update_time); | 694 | usecs = nsecs_to_usecs(ftrace_update_time); |
665 | if (ftrace_update_tot_cnt > 100000) { | 695 | if (ftrace_update_tot_cnt > 100000) { |
666 | ftrace_update_tot_cnt = 0; | 696 | ftrace_update_tot_cnt = 0; |
667 | pr_info("hm, dftrace overflow: %lu change%s" | 697 | pr_info("hm, dftrace overflow: %lu change%s" |
668 | " (%lu total) in %lu usec%s\n", | 698 | " (%lu total) in %lu usec%s\n", |
669 | ftrace_update_cnt, | 699 | ftrace_update_cnt, |
670 | ftrace_update_cnt != 1 ? "s" : "", | 700 | ftrace_update_cnt != 1 ? "s" : "", |
671 | ftrace_update_tot_cnt, | 701 | ftrace_update_tot_cnt, |
@@ -673,15 +703,10 @@ static int ftraced(void *ignore) | |||
673 | ftrace_disabled = 1; | 703 | ftrace_disabled = 1; |
674 | WARN_ON_ONCE(1); | 704 | WARN_ON_ONCE(1); |
675 | } | 705 | } |
676 | ftraced_trigger = 0; | ||
677 | ftrace_record_suspend--; | ||
678 | } | 706 | } |
679 | ftraced_iteration_counter++; | ||
680 | mutex_unlock(&ftraced_lock); | 707 | mutex_unlock(&ftraced_lock); |
681 | mutex_unlock(&ftrace_sysctl_lock); | 708 | mutex_unlock(&ftrace_sysctl_lock); |
682 | 709 | ||
683 | wake_up_interruptible(&ftraced_waiters); | ||
684 | |||
685 | ftrace_shutdown_replenish(); | 710 | ftrace_shutdown_replenish(); |
686 | } | 711 | } |
687 | __set_current_state(TASK_RUNNING); | 712 | __set_current_state(TASK_RUNNING); |
@@ -1219,6 +1244,55 @@ ftrace_notrace_release(struct inode *inode, struct file *file) | |||
1219 | return ftrace_regex_release(inode, file, 0); | 1244 | return ftrace_regex_release(inode, file, 0); |
1220 | } | 1245 | } |
1221 | 1246 | ||
1247 | static ssize_t | ||
1248 | ftraced_read(struct file *filp, char __user *ubuf, | ||
1249 | size_t cnt, loff_t *ppos) | ||
1250 | { | ||
1251 | /* don't worry about races */ | ||
1252 | char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; | ||
1253 | int r = strlen(buf); | ||
1254 | |||
1255 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1256 | } | ||
1257 | |||
1258 | static ssize_t | ||
1259 | ftraced_write(struct file *filp, const char __user *ubuf, | ||
1260 | size_t cnt, loff_t *ppos) | ||
1261 | { | ||
1262 | char buf[64]; | ||
1263 | long val; | ||
1264 | int ret; | ||
1265 | |||
1266 | if (cnt >= sizeof(buf)) | ||
1267 | return -EINVAL; | ||
1268 | |||
1269 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1270 | return -EFAULT; | ||
1271 | |||
1272 | if (strncmp(buf, "enable", 6) == 0) | ||
1273 | val = 1; | ||
1274 | else if (strncmp(buf, "disable", 7) == 0) | ||
1275 | val = 0; | ||
1276 | else { | ||
1277 | buf[cnt] = 0; | ||
1278 | |||
1279 | ret = strict_strtoul(buf, 10, &val); | ||
1280 | if (ret < 0) | ||
1281 | return ret; | ||
1282 | |||
1283 | val = !!val; | ||
1284 | } | ||
1285 | |||
1286 | if (val) | ||
1287 | ftrace_enable_daemon(); | ||
1288 | else | ||
1289 | ftrace_disable_daemon(); | ||
1290 | |||
1291 | filp->f_pos += cnt; | ||
1292 | |||
1293 | return cnt; | ||
1294 | } | ||
1295 | |||
1222 | static struct file_operations ftrace_avail_fops = { | 1296 | static struct file_operations ftrace_avail_fops = { |
1223 | .open = ftrace_avail_open, | 1297 | .open = ftrace_avail_open, |
1224 | .read = seq_read, | 1298 | .read = seq_read, |
@@ -1242,51 +1316,34 @@ static struct file_operations ftrace_notrace_fops = { | |||
1242 | .release = ftrace_notrace_release, | 1316 | .release = ftrace_notrace_release, |
1243 | }; | 1317 | }; |
1244 | 1318 | ||
1319 | static struct file_operations ftraced_fops = { | ||
1320 | .open = tracing_open_generic, | ||
1321 | .read = ftraced_read, | ||
1322 | .write = ftraced_write, | ||
1323 | }; | ||
1324 | |||
1245 | /** | 1325 | /** |
1246 | * ftrace_force_update - force an update to all recording ftrace functions | 1326 | * ftrace_force_update - force an update to all recording ftrace functions |
1247 | * | ||
1248 | * The ftrace dynamic update daemon only wakes up once a second. | ||
1249 | * There may be cases where an update needs to be done immediately | ||
1250 | * for tests or internal kernel tracing to begin. This function | ||
1251 | * wakes the daemon to do an update and will not return until the | ||
1252 | * update is complete. | ||
1253 | */ | 1327 | */ |
1254 | int ftrace_force_update(void) | 1328 | int ftrace_force_update(void) |
1255 | { | 1329 | { |
1256 | unsigned long last_counter; | ||
1257 | DECLARE_WAITQUEUE(wait, current); | ||
1258 | int ret = 0; | 1330 | int ret = 0; |
1259 | 1331 | ||
1260 | if (unlikely(ftrace_disabled)) | 1332 | if (unlikely(ftrace_disabled)) |
1261 | return -ENODEV; | 1333 | return -ENODEV; |
1262 | 1334 | ||
1335 | mutex_lock(&ftrace_sysctl_lock); | ||
1263 | mutex_lock(&ftraced_lock); | 1336 | mutex_lock(&ftraced_lock); |
1264 | last_counter = ftraced_iteration_counter; | ||
1265 | |||
1266 | set_current_state(TASK_INTERRUPTIBLE); | ||
1267 | add_wait_queue(&ftraced_waiters, &wait); | ||
1268 | 1337 | ||
1269 | if (unlikely(!ftraced_task)) { | 1338 | /* |
1270 | ret = -ENODEV; | 1339 | * If ftraced_trigger is not set, then there is nothing |
1271 | goto out; | 1340 | * to update. |
1272 | } | 1341 | */ |
1273 | 1342 | if (ftraced_trigger && !ftrace_update_code()) | |
1274 | do { | 1343 | ret = -EBUSY; |
1275 | mutex_unlock(&ftraced_lock); | ||
1276 | wake_up_process(ftraced_task); | ||
1277 | schedule(); | ||
1278 | mutex_lock(&ftraced_lock); | ||
1279 | if (signal_pending(current)) { | ||
1280 | ret = -EINTR; | ||
1281 | break; | ||
1282 | } | ||
1283 | set_current_state(TASK_INTERRUPTIBLE); | ||
1284 | } while (last_counter == ftraced_iteration_counter); | ||
1285 | 1344 | ||
1286 | out: | ||
1287 | mutex_unlock(&ftraced_lock); | 1345 | mutex_unlock(&ftraced_lock); |
1288 | remove_wait_queue(&ftraced_waiters, &wait); | 1346 | mutex_unlock(&ftrace_sysctl_lock); |
1289 | set_current_state(TASK_RUNNING); | ||
1290 | 1347 | ||
1291 | return ret; | 1348 | return ret; |
1292 | } | 1349 | } |
@@ -1331,6 +1388,12 @@ static __init int ftrace_init_debugfs(void) | |||
1331 | if (!entry) | 1388 | if (!entry) |
1332 | pr_warning("Could not create debugfs " | 1389 | pr_warning("Could not create debugfs " |
1333 | "'set_ftrace_notrace' entry\n"); | 1390 | "'set_ftrace_notrace' entry\n"); |
1391 | |||
1392 | entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, | ||
1393 | NULL, &ftraced_fops); | ||
1394 | if (!entry) | ||
1395 | pr_warning("Could not create debugfs " | ||
1396 | "'ftraced_enabled' entry\n"); | ||
1334 | return 0; | 1397 | return 0; |
1335 | } | 1398 | } |
1336 | 1399 | ||