diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-26 00:16:23 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-26 00:52:52 -0500 |
commit | df4fc31558dd2a3a30292ddb3a64c2a5befcec73 (patch) | |
tree | e7e57093541568a039175aa846fe135f6ba575e2 /kernel/trace/ftrace.c | |
parent | c2324b694fa8ffee382a124198c68754088e483c (diff) |
ftrace: add function tracing to single thread
Impact: feature to function trace a single thread
This patch adds the ability to function trace a single thread.
The file:
/debugfs/tracing/set_ftrace_pid
contains the pid to trace. Valid pids are any positive integer.
Writing any negative number to this file will disable the pid
tracing and the function tracer will go back to tracing all of
threads.
This feature works with both static and dynamic function tracing.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 209 |
1 files changed, 183 insertions, 26 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7e2d3b91692d..00d98c65fad0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,6 +47,9 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* ftrace_pid_trace >= 0 will only trace threads with this pid */ | ||
51 | static int ftrace_pid_trace = -1; | ||
52 | |||
50 | /* Quick disabling of function tracer. */ | 53 | /* Quick disabling of function tracer. */ |
51 | int function_trace_stop; | 54 | int function_trace_stop; |
52 | 55 | ||
@@ -61,6 +64,7 @@ static int ftrace_disabled __read_mostly; | |||
61 | 64 | ||
62 | static DEFINE_SPINLOCK(ftrace_lock); | 65 | static DEFINE_SPINLOCK(ftrace_lock); |
63 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 66 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
67 | static DEFINE_MUTEX(ftrace_start_lock); | ||
64 | 68 | ||
65 | static struct ftrace_ops ftrace_list_end __read_mostly = | 69 | static struct ftrace_ops ftrace_list_end __read_mostly = |
66 | { | 70 | { |
@@ -70,6 +74,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
70 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 74 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
71 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 75 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
72 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 76 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
77 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
73 | 78 | ||
74 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 79 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
75 | { | 80 | { |
@@ -86,6 +91,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
86 | }; | 91 | }; |
87 | } | 92 | } |
88 | 93 | ||
94 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
95 | { | ||
96 | if (current->pid != ftrace_pid_trace) | ||
97 | return; | ||
98 | |||
99 | ftrace_pid_function(ip, parent_ip); | ||
100 | } | ||
101 | |||
102 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
103 | { | ||
104 | /* do not set ftrace_pid_function to itself! */ | ||
105 | if (func != ftrace_pid_func) | ||
106 | ftrace_pid_function = func; | ||
107 | } | ||
108 | |||
89 | /** | 109 | /** |
90 | * clear_ftrace_function - reset the ftrace function | 110 | * clear_ftrace_function - reset the ftrace function |
91 | * | 111 | * |
@@ -96,6 +116,7 @@ void clear_ftrace_function(void) | |||
96 | { | 116 | { |
97 | ftrace_trace_function = ftrace_stub; | 117 | ftrace_trace_function = ftrace_stub; |
98 | __ftrace_trace_function = ftrace_stub; | 118 | __ftrace_trace_function = ftrace_stub; |
119 | ftrace_pid_function = ftrace_stub; | ||
99 | } | 120 | } |
100 | 121 | ||
101 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 122 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
@@ -128,20 +149,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
128 | ftrace_list = ops; | 149 | ftrace_list = ops; |
129 | 150 | ||
130 | if (ftrace_enabled) { | 151 | if (ftrace_enabled) { |
152 | ftrace_func_t func; | ||
153 | |||
154 | if (ops->next == &ftrace_list_end) | ||
155 | func = ops->func; | ||
156 | else | ||
157 | func = ftrace_list_func; | ||
158 | |||
159 | if (ftrace_pid_trace >= 0) { | ||
160 | set_ftrace_pid_function(func); | ||
161 | func = ftrace_pid_func; | ||
162 | } | ||
163 | |||
131 | /* | 164 | /* |
132 | * For one func, simply call it directly. | 165 | * For one func, simply call it directly. |
133 | * For more than one func, call the chain. | 166 | * For more than one func, call the chain. |
134 | */ | 167 | */ |
135 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 168 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
136 | if (ops->next == &ftrace_list_end) | 169 | ftrace_trace_function = func; |
137 | ftrace_trace_function = ops->func; | ||
138 | else | ||
139 | ftrace_trace_function = ftrace_list_func; | ||
140 | #else | 170 | #else |
141 | if (ops->next == &ftrace_list_end) | 171 | __ftrace_trace_function = func; |
142 | __ftrace_trace_function = ops->func; | ||
143 | else | ||
144 | __ftrace_trace_function = ftrace_list_func; | ||
145 | ftrace_trace_function = ftrace_test_stop_func; | 172 | ftrace_trace_function = ftrace_test_stop_func; |
146 | #endif | 173 | #endif |
147 | } | 174 | } |
@@ -182,8 +209,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
182 | 209 | ||
183 | if (ftrace_enabled) { | 210 | if (ftrace_enabled) { |
184 | /* If we only have one func left, then call that directly */ | 211 | /* If we only have one func left, then call that directly */ |
185 | if (ftrace_list->next == &ftrace_list_end) | 212 | if (ftrace_list->next == &ftrace_list_end) { |
186 | ftrace_trace_function = ftrace_list->func; | 213 | ftrace_func_t func = ftrace_list->func; |
214 | |||
215 | if (ftrace_pid_trace >= 0) { | ||
216 | set_ftrace_pid_function(func); | ||
217 | func = ftrace_pid_func; | ||
218 | } | ||
219 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
220 | ftrace_trace_function = func; | ||
221 | #else | ||
222 | __ftrace_trace_function = func; | ||
223 | #endif | ||
224 | } | ||
187 | } | 225 | } |
188 | 226 | ||
189 | out: | 227 | out: |
@@ -192,6 +230,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
192 | return ret; | 230 | return ret; |
193 | } | 231 | } |
194 | 232 | ||
233 | static void ftrace_update_pid_func(void) | ||
234 | { | ||
235 | ftrace_func_t func; | ||
236 | |||
237 | /* should not be called from interrupt context */ | ||
238 | spin_lock(&ftrace_lock); | ||
239 | |||
240 | if (ftrace_trace_function == ftrace_stub) | ||
241 | goto out; | ||
242 | |||
243 | func = ftrace_trace_function; | ||
244 | |||
245 | if (ftrace_pid_trace >= 0) { | ||
246 | set_ftrace_pid_function(func); | ||
247 | func = ftrace_pid_func; | ||
248 | } else { | ||
249 | if (func != ftrace_pid_func) | ||
250 | goto out; | ||
251 | |||
252 | set_ftrace_pid_function(func); | ||
253 | } | ||
254 | |||
255 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
256 | ftrace_trace_function = func; | ||
257 | #else | ||
258 | __ftrace_trace_function = func; | ||
259 | #endif | ||
260 | |||
261 | out: | ||
262 | spin_unlock(&ftrace_lock); | ||
263 | } | ||
264 | |||
195 | #ifdef CONFIG_DYNAMIC_FTRACE | 265 | #ifdef CONFIG_DYNAMIC_FTRACE |
196 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 266 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
197 | # error Dynamic ftrace depends on MCOUNT_RECORD | 267 | # error Dynamic ftrace depends on MCOUNT_RECORD |
@@ -545,7 +615,19 @@ static void ftrace_run_update_code(int command) | |||
545 | 615 | ||
546 | static ftrace_func_t saved_ftrace_func; | 616 | static ftrace_func_t saved_ftrace_func; |
547 | static int ftrace_start_up; | 617 | static int ftrace_start_up; |
548 | static DEFINE_MUTEX(ftrace_start_lock); | 618 | |
619 | static void ftrace_startup_enable(int command) | ||
620 | { | ||
621 | if (saved_ftrace_func != ftrace_trace_function) { | ||
622 | saved_ftrace_func = ftrace_trace_function; | ||
623 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
624 | } | ||
625 | |||
626 | if (!command || !ftrace_enabled) | ||
627 | return; | ||
628 | |||
629 | ftrace_run_update_code(command); | ||
630 | } | ||
549 | 631 | ||
550 | static void ftrace_startup(void) | 632 | static void ftrace_startup(void) |
551 | { | 633 | { |
@@ -558,16 +640,8 @@ static void ftrace_startup(void) | |||
558 | ftrace_start_up++; | 640 | ftrace_start_up++; |
559 | command |= FTRACE_ENABLE_CALLS; | 641 | command |= FTRACE_ENABLE_CALLS; |
560 | 642 | ||
561 | if (saved_ftrace_func != ftrace_trace_function) { | 643 | ftrace_startup_enable(command); |
562 | saved_ftrace_func = ftrace_trace_function; | ||
563 | command |= FTRACE_UPDATE_TRACE_FUNC; | ||
564 | } | ||
565 | |||
566 | if (!command || !ftrace_enabled) | ||
567 | goto out; | ||
568 | 644 | ||
569 | ftrace_run_update_code(command); | ||
570 | out: | ||
571 | mutex_unlock(&ftrace_start_lock); | 645 | mutex_unlock(&ftrace_start_lock); |
572 | } | 646 | } |
573 | 647 | ||
@@ -1262,13 +1336,10 @@ static struct file_operations ftrace_notrace_fops = { | |||
1262 | .release = ftrace_notrace_release, | 1336 | .release = ftrace_notrace_release, |
1263 | }; | 1337 | }; |
1264 | 1338 | ||
1265 | static __init int ftrace_init_debugfs(void) | 1339 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
1266 | { | 1340 | { |
1267 | struct dentry *d_tracer; | ||
1268 | struct dentry *entry; | 1341 | struct dentry *entry; |
1269 | 1342 | ||
1270 | d_tracer = tracing_init_dentry(); | ||
1271 | |||
1272 | entry = debugfs_create_file("available_filter_functions", 0444, | 1343 | entry = debugfs_create_file("available_filter_functions", 0444, |
1273 | d_tracer, NULL, &ftrace_avail_fops); | 1344 | d_tracer, NULL, &ftrace_avail_fops); |
1274 | if (!entry) | 1345 | if (!entry) |
@@ -1295,8 +1366,6 @@ static __init int ftrace_init_debugfs(void) | |||
1295 | return 0; | 1366 | return 0; |
1296 | } | 1367 | } |
1297 | 1368 | ||
1298 | fs_initcall(ftrace_init_debugfs); | ||
1299 | |||
1300 | static int ftrace_convert_nops(struct module *mod, | 1369 | static int ftrace_convert_nops(struct module *mod, |
1301 | unsigned long *start, | 1370 | unsigned long *start, |
1302 | unsigned long *end) | 1371 | unsigned long *end) |
@@ -1382,12 +1451,100 @@ static int __init ftrace_nodyn_init(void) | |||
1382 | } | 1451 | } |
1383 | device_initcall(ftrace_nodyn_init); | 1452 | device_initcall(ftrace_nodyn_init); |
1384 | 1453 | ||
1454 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | ||
1455 | static inline void ftrace_startup_enable(int command) { } | ||
1385 | # define ftrace_startup() do { } while (0) | 1456 | # define ftrace_startup() do { } while (0) |
1386 | # define ftrace_shutdown() do { } while (0) | 1457 | # define ftrace_shutdown() do { } while (0) |
1387 | # define ftrace_startup_sysctl() do { } while (0) | 1458 | # define ftrace_startup_sysctl() do { } while (0) |
1388 | # define ftrace_shutdown_sysctl() do { } while (0) | 1459 | # define ftrace_shutdown_sysctl() do { } while (0) |
1389 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1460 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1390 | 1461 | ||
1462 | static ssize_t | ||
1463 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
1464 | size_t cnt, loff_t *ppos) | ||
1465 | { | ||
1466 | char buf[64]; | ||
1467 | int r; | ||
1468 | |||
1469 | if (ftrace_pid_trace >= 0) | ||
1470 | r = sprintf(buf, "%u\n", ftrace_pid_trace); | ||
1471 | else | ||
1472 | r = sprintf(buf, "no pid\n"); | ||
1473 | |||
1474 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1475 | } | ||
1476 | |||
1477 | static ssize_t | ||
1478 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
1479 | size_t cnt, loff_t *ppos) | ||
1480 | { | ||
1481 | char buf[64]; | ||
1482 | long val; | ||
1483 | int ret; | ||
1484 | |||
1485 | if (cnt >= sizeof(buf)) | ||
1486 | return -EINVAL; | ||
1487 | |||
1488 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1489 | return -EFAULT; | ||
1490 | |||
1491 | buf[cnt] = 0; | ||
1492 | |||
1493 | ret = strict_strtol(buf, 10, &val); | ||
1494 | if (ret < 0) | ||
1495 | return ret; | ||
1496 | |||
1497 | mutex_lock(&ftrace_start_lock); | ||
1498 | if (ret < 0) { | ||
1499 | /* disable pid tracing */ | ||
1500 | if (ftrace_pid_trace < 0) | ||
1501 | goto out; | ||
1502 | ftrace_pid_trace = -1; | ||
1503 | |||
1504 | } else { | ||
1505 | |||
1506 | if (ftrace_pid_trace == val) | ||
1507 | goto out; | ||
1508 | |||
1509 | ftrace_pid_trace = val; | ||
1510 | } | ||
1511 | |||
1512 | /* update the function call */ | ||
1513 | ftrace_update_pid_func(); | ||
1514 | ftrace_startup_enable(0); | ||
1515 | |||
1516 | out: | ||
1517 | mutex_unlock(&ftrace_start_lock); | ||
1518 | |||
1519 | return cnt; | ||
1520 | } | ||
1521 | |||
1522 | static struct file_operations ftrace_pid_fops = { | ||
1523 | .read = ftrace_pid_read, | ||
1524 | .write = ftrace_pid_write, | ||
1525 | }; | ||
1526 | |||
1527 | static __init int ftrace_init_debugfs(void) | ||
1528 | { | ||
1529 | struct dentry *d_tracer; | ||
1530 | struct dentry *entry; | ||
1531 | |||
1532 | d_tracer = tracing_init_dentry(); | ||
1533 | if (!d_tracer) | ||
1534 | return 0; | ||
1535 | |||
1536 | ftrace_init_dyn_debugfs(d_tracer); | ||
1537 | |||
1538 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
1539 | NULL, &ftrace_pid_fops); | ||
1540 | if (!entry) | ||
1541 | pr_warning("Could not create debugfs " | ||
1542 | "'set_ftrace_pid' entry\n"); | ||
1543 | return 0; | ||
1544 | } | ||
1545 | |||
1546 | fs_initcall(ftrace_init_debugfs); | ||
1547 | |||
1391 | /** | 1548 | /** |
1392 | * ftrace_kill - kill ftrace | 1549 | * ftrace_kill - kill ftrace |
1393 | * | 1550 | * |