diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-17 19:09:09 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-17 19:12:33 -0400 |
commit | 0f8f86c7bdd1c954fbe153af437a0d91a6c5721a (patch) | |
tree | 94a8d419a470a4f9852ca397bb9bbe48db92ff5c /kernel/trace | |
parent | dca2d6ac09d9ef59ff46820d4f0c94b08a671202 (diff) | |
parent | f39cdf25bf77219676ec5360980ac40b1a7e144a (diff) |
Merge commit 'perf/core' into perf/hw-breakpoint
Conflicts:
kernel/Makefile
kernel/trace/Makefile
kernel/trace/trace.h
samples/Makefile
Merge reason: We need to be uptodate with the perf events development
branch because we plan to rewrite the breakpoints API on top of
perf events.
Diffstat (limited to 'kernel/trace')
30 files changed, 2060 insertions, 1481 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5efeb4229ea0..06c3d5be6759 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -11,12 +11,18 @@ config NOP_TRACER | |||
11 | 11 | ||
12 | config HAVE_FTRACE_NMI_ENTER | 12 | config HAVE_FTRACE_NMI_ENTER |
13 | bool | 13 | bool |
14 | help | ||
15 | See Documentation/trace/ftrace-implementation.txt | ||
14 | 16 | ||
15 | config HAVE_FUNCTION_TRACER | 17 | config HAVE_FUNCTION_TRACER |
16 | bool | 18 | bool |
19 | help | ||
20 | See Documentation/trace/ftrace-implementation.txt | ||
17 | 21 | ||
18 | config HAVE_FUNCTION_GRAPH_TRACER | 22 | config HAVE_FUNCTION_GRAPH_TRACER |
19 | bool | 23 | bool |
24 | help | ||
25 | See Documentation/trace/ftrace-implementation.txt | ||
20 | 26 | ||
21 | config HAVE_FUNCTION_GRAPH_FP_TEST | 27 | config HAVE_FUNCTION_GRAPH_FP_TEST |
22 | bool | 28 | bool |
@@ -28,21 +34,25 @@ config HAVE_FUNCTION_GRAPH_FP_TEST | |||
28 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
29 | bool | 35 | bool |
30 | help | 36 | help |
31 | This gets selected when the arch tests the function_trace_stop | 37 | See Documentation/trace/ftrace-implementation.txt |
32 | variable at the mcount call site. Otherwise, this variable | ||
33 | is tested by the called function. | ||
34 | 38 | ||
35 | config HAVE_DYNAMIC_FTRACE | 39 | config HAVE_DYNAMIC_FTRACE |
36 | bool | 40 | bool |
41 | help | ||
42 | See Documentation/trace/ftrace-implementation.txt | ||
37 | 43 | ||
38 | config HAVE_FTRACE_MCOUNT_RECORD | 44 | config HAVE_FTRACE_MCOUNT_RECORD |
39 | bool | 45 | bool |
46 | help | ||
47 | See Documentation/trace/ftrace-implementation.txt | ||
40 | 48 | ||
41 | config HAVE_HW_BRANCH_TRACER | 49 | config HAVE_HW_BRANCH_TRACER |
42 | bool | 50 | bool |
43 | 51 | ||
44 | config HAVE_SYSCALL_TRACEPOINTS | 52 | config HAVE_SYSCALL_TRACEPOINTS |
45 | bool | 53 | bool |
54 | help | ||
55 | See Documentation/trace/ftrace-implementation.txt | ||
46 | 56 | ||
47 | config TRACER_MAX_TRACE | 57 | config TRACER_MAX_TRACE |
48 | bool | 58 | bool |
@@ -73,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP | |||
73 | # This allows those options to appear when no other tracer is selected. But the | 83 | # This allows those options to appear when no other tracer is selected. But the |
74 | # options do not appear when something else selects it. We need the two options | 84 | # options do not appear when something else selects it. We need the two options |
75 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | 85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the |
76 | # hidding of the automatic options options. | 86 | # hidding of the automatic options. |
77 | 87 | ||
78 | config TRACING | 88 | config TRACING |
79 | bool | 89 | bool |
@@ -490,6 +500,18 @@ config FTRACE_STARTUP_TEST | |||
490 | functioning properly. It will do tests on all the configured | 500 | functioning properly. It will do tests on all the configured |
491 | tracers of ftrace. | 501 | tracers of ftrace. |
492 | 502 | ||
503 | config EVENT_TRACE_TEST_SYSCALLS | ||
504 | bool "Run selftest on syscall events" | ||
505 | depends on FTRACE_STARTUP_TEST | ||
506 | help | ||
507 | This option will also enable testing every syscall event. | ||
508 | It only enables the event and disables it and runs various loads | ||
509 | with the event enabled. This adds a bit more time for kernel boot | ||
510 | up since it runs this on every system call defined. | ||
511 | |||
512 | TBD - enable a way to actually call the syscalls as we test their | ||
513 | events | ||
514 | |||
493 | config MMIOTRACE | 515 | config MMIOTRACE |
494 | bool "Memory mapped IO tracing" | 516 | bool "Memory mapped IO tracing" |
495 | depends on HAVE_MMIOTRACE_SUPPORT && PCI | 517 | depends on HAVE_MMIOTRACE_SUPPORT && PCI |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index ce3b1cd02732..0f84c52e58fe 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | |||
42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | 42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
44 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 44 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
45 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | ||
46 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | 45 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o |
47 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 46 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
48 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 47 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
@@ -55,5 +54,6 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | |||
55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 54 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | 56 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o |
57 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | ||
58 | 58 | ||
59 | libftrace-y := ftrace.o | 59 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3eb159c277c8..d9d6206e0b14 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |||
856 | } | 856 | } |
857 | 857 | ||
858 | /** | 858 | /** |
859 | * blk_add_trace_rq_remap - Add a trace for a request-remap operation | ||
860 | * @q: queue the io is for | ||
861 | * @rq: the source request | ||
862 | * @dev: target device | ||
863 | * @from: source sector | ||
864 | * | ||
865 | * Description: | ||
866 | * Device mapper remaps request to other devices. | ||
867 | * Add a trace for that action. | ||
868 | * | ||
869 | **/ | ||
870 | static void blk_add_trace_rq_remap(struct request_queue *q, | ||
871 | struct request *rq, dev_t dev, | ||
872 | sector_t from) | ||
873 | { | ||
874 | struct blk_trace *bt = q->blk_trace; | ||
875 | struct blk_io_trace_remap r; | ||
876 | |||
877 | if (likely(!bt)) | ||
878 | return; | ||
879 | |||
880 | r.device_from = cpu_to_be32(dev); | ||
881 | r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); | ||
882 | r.sector_from = cpu_to_be64(from); | ||
883 | |||
884 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | ||
885 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | ||
886 | sizeof(r), &r); | ||
887 | } | ||
888 | |||
889 | /** | ||
859 | * blk_add_driver_data - Add binary message with driver-specific data | 890 | * blk_add_driver_data - Add binary message with driver-specific data |
860 | * @q: queue the io is for | 891 | * @q: queue the io is for |
861 | * @rq: io request | 892 | * @rq: io request |
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void) | |||
922 | WARN_ON(ret); | 953 | WARN_ON(ret); |
923 | ret = register_trace_block_remap(blk_add_trace_remap); | 954 | ret = register_trace_block_remap(blk_add_trace_remap); |
924 | WARN_ON(ret); | 955 | WARN_ON(ret); |
956 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
957 | WARN_ON(ret); | ||
925 | } | 958 | } |
926 | 959 | ||
927 | static void blk_unregister_tracepoints(void) | 960 | static void blk_unregister_tracepoints(void) |
928 | { | 961 | { |
962 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
929 | unregister_trace_block_remap(blk_add_trace_remap); | 963 | unregister_trace_block_remap(blk_add_trace_remap); |
930 | unregister_trace_block_split(blk_add_trace_split); | 964 | unregister_trace_block_split(blk_add_trace_split); |
931 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | 965 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); |
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev) | |||
1657 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); | 1691 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); |
1658 | } | 1692 | } |
1659 | 1693 | ||
1694 | void blk_trace_remove_sysfs(struct device *dev) | ||
1695 | { | ||
1696 | sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); | ||
1697 | } | ||
1698 | |||
1660 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 1699 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
1661 | 1700 | ||
1662 | #ifdef CONFIG_EVENT_TRACING | 1701 | #ifdef CONFIG_EVENT_TRACING |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8c804e24f96f..b10c0d90a6ff 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -60,6 +60,13 @@ static int last_ftrace_enabled; | |||
60 | /* Quick disabling of function tracer. */ | 60 | /* Quick disabling of function tracer. */ |
61 | int function_trace_stop; | 61 | int function_trace_stop; |
62 | 62 | ||
63 | /* List for set_ftrace_pid's pids. */ | ||
64 | LIST_HEAD(ftrace_pids); | ||
65 | struct ftrace_pid { | ||
66 | struct list_head list; | ||
67 | struct pid *pid; | ||
68 | }; | ||
69 | |||
63 | /* | 70 | /* |
64 | * ftrace_disabled is set when an anomaly is discovered. | 71 | * ftrace_disabled is set when an anomaly is discovered. |
65 | * ftrace_disabled is much stronger than ftrace_enabled. | 72 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |||
78 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 85 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
79 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 86 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
80 | 87 | ||
88 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
89 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | ||
90 | #endif | ||
91 | |||
81 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 92 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
82 | { | 93 | { |
83 | struct ftrace_ops *op = ftrace_list; | 94 | struct ftrace_ops *op = ftrace_list; |
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
155 | else | 166 | else |
156 | func = ftrace_list_func; | 167 | func = ftrace_list_func; |
157 | 168 | ||
158 | if (ftrace_pid_trace) { | 169 | if (!list_empty(&ftrace_pids)) { |
159 | set_ftrace_pid_function(func); | 170 | set_ftrace_pid_function(func); |
160 | func = ftrace_pid_func; | 171 | func = ftrace_pid_func; |
161 | } | 172 | } |
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
203 | if (ftrace_list->next == &ftrace_list_end) { | 214 | if (ftrace_list->next == &ftrace_list_end) { |
204 | ftrace_func_t func = ftrace_list->func; | 215 | ftrace_func_t func = ftrace_list->func; |
205 | 216 | ||
206 | if (ftrace_pid_trace) { | 217 | if (!list_empty(&ftrace_pids)) { |
207 | set_ftrace_pid_function(func); | 218 | set_ftrace_pid_function(func); |
208 | func = ftrace_pid_func; | 219 | func = ftrace_pid_func; |
209 | } | 220 | } |
@@ -225,9 +236,13 @@ static void ftrace_update_pid_func(void) | |||
225 | if (ftrace_trace_function == ftrace_stub) | 236 | if (ftrace_trace_function == ftrace_stub) |
226 | return; | 237 | return; |
227 | 238 | ||
239 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
228 | func = ftrace_trace_function; | 240 | func = ftrace_trace_function; |
241 | #else | ||
242 | func = __ftrace_trace_function; | ||
243 | #endif | ||
229 | 244 | ||
230 | if (ftrace_pid_trace) { | 245 | if (!list_empty(&ftrace_pids)) { |
231 | set_ftrace_pid_function(func); | 246 | set_ftrace_pid_function(func); |
232 | func = ftrace_pid_func; | 247 | func = ftrace_pid_func; |
233 | } else { | 248 | } else { |
@@ -817,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
817 | } | 832 | } |
818 | #endif /* CONFIG_FUNCTION_PROFILER */ | 833 | #endif /* CONFIG_FUNCTION_PROFILER */ |
819 | 834 | ||
820 | /* set when tracing only a pid */ | ||
821 | struct pid *ftrace_pid_trace; | ||
822 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 835 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
823 | 836 | ||
824 | #ifdef CONFIG_DYNAMIC_FTRACE | 837 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -1074,14 +1087,9 @@ static void ftrace_replace_code(int enable) | |||
1074 | failed = __ftrace_replace_code(rec, enable); | 1087 | failed = __ftrace_replace_code(rec, enable); |
1075 | if (failed) { | 1088 | if (failed) { |
1076 | rec->flags |= FTRACE_FL_FAILED; | 1089 | rec->flags |= FTRACE_FL_FAILED; |
1077 | if ((system_state == SYSTEM_BOOTING) || | 1090 | ftrace_bug(failed, rec->ip); |
1078 | !core_kernel_text(rec->ip)) { | 1091 | /* Stop processing */ |
1079 | ftrace_free_rec(rec); | 1092 | return; |
1080 | } else { | ||
1081 | ftrace_bug(failed, rec->ip); | ||
1082 | /* Stop processing */ | ||
1083 | return; | ||
1084 | } | ||
1085 | } | 1093 | } |
1086 | } while_for_each_ftrace_rec(); | 1094 | } while_for_each_ftrace_rec(); |
1087 | } | 1095 | } |
@@ -1262,12 +1270,34 @@ static int ftrace_update_code(struct module *mod) | |||
1262 | ftrace_new_addrs = p->newlist; | 1270 | ftrace_new_addrs = p->newlist; |
1263 | p->flags = 0L; | 1271 | p->flags = 0L; |
1264 | 1272 | ||
1265 | /* convert record (i.e, patch mcount-call with NOP) */ | 1273 | /* |
1266 | if (ftrace_code_disable(mod, p)) { | 1274 | * Do the initial record convertion from mcount jump |
1267 | p->flags |= FTRACE_FL_CONVERTED; | 1275 | * to the NOP instructions. |
1268 | ftrace_update_cnt++; | 1276 | */ |
1269 | } else | 1277 | if (!ftrace_code_disable(mod, p)) { |
1270 | ftrace_free_rec(p); | 1278 | ftrace_free_rec(p); |
1279 | continue; | ||
1280 | } | ||
1281 | |||
1282 | p->flags |= FTRACE_FL_CONVERTED; | ||
1283 | ftrace_update_cnt++; | ||
1284 | |||
1285 | /* | ||
1286 | * If the tracing is enabled, go ahead and enable the record. | ||
1287 | * | ||
1288 | * The reason not to enable the record immediatelly is the | ||
1289 | * inherent check of ftrace_make_nop/ftrace_make_call for | ||
1290 | * correct previous instructions. Making first the NOP | ||
1291 | * conversion puts the module to the correct state, thus | ||
1292 | * passing the ftrace_make_call check. | ||
1293 | */ | ||
1294 | if (ftrace_start_up) { | ||
1295 | int failed = __ftrace_replace_code(p, 1); | ||
1296 | if (failed) { | ||
1297 | ftrace_bug(failed, p->ip); | ||
1298 | ftrace_free_rec(p); | ||
1299 | } | ||
1300 | } | ||
1271 | } | 1301 | } |
1272 | 1302 | ||
1273 | stop = ftrace_now(raw_smp_processor_id()); | 1303 | stop = ftrace_now(raw_smp_processor_id()); |
@@ -1323,11 +1353,10 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
1323 | 1353 | ||
1324 | enum { | 1354 | enum { |
1325 | FTRACE_ITER_FILTER = (1 << 0), | 1355 | FTRACE_ITER_FILTER = (1 << 0), |
1326 | FTRACE_ITER_CONT = (1 << 1), | 1356 | FTRACE_ITER_NOTRACE = (1 << 1), |
1327 | FTRACE_ITER_NOTRACE = (1 << 2), | 1357 | FTRACE_ITER_FAILURES = (1 << 2), |
1328 | FTRACE_ITER_FAILURES = (1 << 3), | 1358 | FTRACE_ITER_PRINTALL = (1 << 3), |
1329 | FTRACE_ITER_PRINTALL = (1 << 4), | 1359 | FTRACE_ITER_HASH = (1 << 4), |
1330 | FTRACE_ITER_HASH = (1 << 5), | ||
1331 | }; | 1360 | }; |
1332 | 1361 | ||
1333 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1362 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
@@ -1337,8 +1366,7 @@ struct ftrace_iterator { | |||
1337 | int hidx; | 1366 | int hidx; |
1338 | int idx; | 1367 | int idx; |
1339 | unsigned flags; | 1368 | unsigned flags; |
1340 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | 1369 | struct trace_parser parser; |
1341 | unsigned buffer_idx; | ||
1342 | }; | 1370 | }; |
1343 | 1371 | ||
1344 | static void * | 1372 | static void * |
@@ -1407,7 +1435,7 @@ static int t_hash_show(struct seq_file *m, void *v) | |||
1407 | if (rec->ops->print) | 1435 | if (rec->ops->print) |
1408 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | 1436 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); |
1409 | 1437 | ||
1410 | seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func); | 1438 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); |
1411 | 1439 | ||
1412 | if (rec->data) | 1440 | if (rec->data) |
1413 | seq_printf(m, ":%p", rec->data); | 1441 | seq_printf(m, ":%p", rec->data); |
@@ -1517,12 +1545,12 @@ static int t_show(struct seq_file *m, void *v) | |||
1517 | if (!rec) | 1545 | if (!rec) |
1518 | return 0; | 1546 | return 0; |
1519 | 1547 | ||
1520 | seq_printf(m, "%pf\n", (void *)rec->ip); | 1548 | seq_printf(m, "%ps\n", (void *)rec->ip); |
1521 | 1549 | ||
1522 | return 0; | 1550 | return 0; |
1523 | } | 1551 | } |
1524 | 1552 | ||
1525 | static struct seq_operations show_ftrace_seq_ops = { | 1553 | static const struct seq_operations show_ftrace_seq_ops = { |
1526 | .start = t_start, | 1554 | .start = t_start, |
1527 | .next = t_next, | 1555 | .next = t_next, |
1528 | .stop = t_stop, | 1556 | .stop = t_stop, |
@@ -1604,6 +1632,11 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1604 | if (!iter) | 1632 | if (!iter) |
1605 | return -ENOMEM; | 1633 | return -ENOMEM; |
1606 | 1634 | ||
1635 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { | ||
1636 | kfree(iter); | ||
1637 | return -ENOMEM; | ||
1638 | } | ||
1639 | |||
1607 | mutex_lock(&ftrace_regex_lock); | 1640 | mutex_lock(&ftrace_regex_lock); |
1608 | if ((file->f_mode & FMODE_WRITE) && | 1641 | if ((file->f_mode & FMODE_WRITE) && |
1609 | (file->f_flags & O_TRUNC)) | 1642 | (file->f_flags & O_TRUNC)) |
@@ -1618,8 +1651,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1618 | if (!ret) { | 1651 | if (!ret) { |
1619 | struct seq_file *m = file->private_data; | 1652 | struct seq_file *m = file->private_data; |
1620 | m->private = iter; | 1653 | m->private = iter; |
1621 | } else | 1654 | } else { |
1655 | trace_parser_put(&iter->parser); | ||
1622 | kfree(iter); | 1656 | kfree(iter); |
1657 | } | ||
1623 | } else | 1658 | } else |
1624 | file->private_data = iter; | 1659 | file->private_data = iter; |
1625 | mutex_unlock(&ftrace_regex_lock); | 1660 | mutex_unlock(&ftrace_regex_lock); |
@@ -1652,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | |||
1652 | return ret; | 1687 | return ret; |
1653 | } | 1688 | } |
1654 | 1689 | ||
1655 | enum { | ||
1656 | MATCH_FULL, | ||
1657 | MATCH_FRONT_ONLY, | ||
1658 | MATCH_MIDDLE_ONLY, | ||
1659 | MATCH_END_ONLY, | ||
1660 | }; | ||
1661 | |||
1662 | /* | ||
1663 | * (static function - no need for kernel doc) | ||
1664 | * | ||
1665 | * Pass in a buffer containing a glob and this function will | ||
1666 | * set search to point to the search part of the buffer and | ||
1667 | * return the type of search it is (see enum above). | ||
1668 | * This does modify buff. | ||
1669 | * | ||
1670 | * Returns enum type. | ||
1671 | * search returns the pointer to use for comparison. | ||
1672 | * not returns 1 if buff started with a '!' | ||
1673 | * 0 otherwise. | ||
1674 | */ | ||
1675 | static int | ||
1676 | ftrace_setup_glob(char *buff, int len, char **search, int *not) | ||
1677 | { | ||
1678 | int type = MATCH_FULL; | ||
1679 | int i; | ||
1680 | |||
1681 | if (buff[0] == '!') { | ||
1682 | *not = 1; | ||
1683 | buff++; | ||
1684 | len--; | ||
1685 | } else | ||
1686 | *not = 0; | ||
1687 | |||
1688 | *search = buff; | ||
1689 | |||
1690 | for (i = 0; i < len; i++) { | ||
1691 | if (buff[i] == '*') { | ||
1692 | if (!i) { | ||
1693 | *search = buff + 1; | ||
1694 | type = MATCH_END_ONLY; | ||
1695 | } else { | ||
1696 | if (type == MATCH_END_ONLY) | ||
1697 | type = MATCH_MIDDLE_ONLY; | ||
1698 | else | ||
1699 | type = MATCH_FRONT_ONLY; | ||
1700 | buff[i] = 0; | ||
1701 | break; | ||
1702 | } | ||
1703 | } | ||
1704 | } | ||
1705 | |||
1706 | return type; | ||
1707 | } | ||
1708 | |||
1709 | static int ftrace_match(char *str, char *regex, int len, int type) | 1690 | static int ftrace_match(char *str, char *regex, int len, int type) |
1710 | { | 1691 | { |
1711 | int matched = 0; | 1692 | int matched = 0; |
@@ -1754,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
1754 | int not; | 1735 | int not; |
1755 | 1736 | ||
1756 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1737 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1757 | type = ftrace_setup_glob(buff, len, &search, ¬); | 1738 | type = filter_parse_regex(buff, len, &search, ¬); |
1758 | 1739 | ||
1759 | search_len = strlen(search); | 1740 | search_len = strlen(search); |
1760 | 1741 | ||
@@ -1822,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1822 | } | 1803 | } |
1823 | 1804 | ||
1824 | if (strlen(buff)) { | 1805 | if (strlen(buff)) { |
1825 | type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); | 1806 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); |
1826 | search_len = strlen(search); | 1807 | search_len = strlen(search); |
1827 | } | 1808 | } |
1828 | 1809 | ||
@@ -1987,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
1987 | int count = 0; | 1968 | int count = 0; |
1988 | char *search; | 1969 | char *search; |
1989 | 1970 | ||
1990 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | 1971 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
1991 | len = strlen(search); | 1972 | len = strlen(search); |
1992 | 1973 | ||
1993 | /* we do not support '!' for function probes */ | 1974 | /* we do not support '!' for function probes */ |
@@ -2059,12 +2040,12 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
2059 | int i, len = 0; | 2040 | int i, len = 0; |
2060 | char *search; | 2041 | char *search; |
2061 | 2042 | ||
2062 | if (glob && (strcmp(glob, "*") || !strlen(glob))) | 2043 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
2063 | glob = NULL; | 2044 | glob = NULL; |
2064 | else { | 2045 | else if (glob) { |
2065 | int not; | 2046 | int not; |
2066 | 2047 | ||
2067 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | 2048 | type = filter_parse_regex(glob, strlen(glob), &search, ¬); |
2068 | len = strlen(search); | 2049 | len = strlen(search); |
2069 | 2050 | ||
2070 | /* we do not support '!' for function probes */ | 2051 | /* we do not support '!' for function probes */ |
@@ -2196,11 +2177,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2196 | size_t cnt, loff_t *ppos, int enable) | 2177 | size_t cnt, loff_t *ppos, int enable) |
2197 | { | 2178 | { |
2198 | struct ftrace_iterator *iter; | 2179 | struct ftrace_iterator *iter; |
2199 | char ch; | 2180 | struct trace_parser *parser; |
2200 | size_t read = 0; | 2181 | ssize_t ret, read; |
2201 | ssize_t ret; | ||
2202 | 2182 | ||
2203 | if (!cnt || cnt < 0) | 2183 | if (!cnt) |
2204 | return 0; | 2184 | return 0; |
2205 | 2185 | ||
2206 | mutex_lock(&ftrace_regex_lock); | 2186 | mutex_lock(&ftrace_regex_lock); |
@@ -2211,72 +2191,23 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2211 | } else | 2191 | } else |
2212 | iter = file->private_data; | 2192 | iter = file->private_data; |
2213 | 2193 | ||
2214 | if (!*ppos) { | 2194 | parser = &iter->parser; |
2215 | iter->flags &= ~FTRACE_ITER_CONT; | 2195 | read = trace_get_user(parser, ubuf, cnt, ppos); |
2216 | iter->buffer_idx = 0; | ||
2217 | } | ||
2218 | |||
2219 | ret = get_user(ch, ubuf++); | ||
2220 | if (ret) | ||
2221 | goto out; | ||
2222 | read++; | ||
2223 | cnt--; | ||
2224 | 2196 | ||
2225 | /* | 2197 | if (read >= 0 && trace_parser_loaded(parser) && |
2226 | * If the parser haven't finished with the last write, | 2198 | !trace_parser_cont(parser)) { |
2227 | * continue reading the user input without skipping spaces. | 2199 | ret = ftrace_process_regex(parser->buffer, |
2228 | */ | 2200 | parser->idx, enable); |
2229 | if (!(iter->flags & FTRACE_ITER_CONT)) { | ||
2230 | /* skip white space */ | ||
2231 | while (cnt && isspace(ch)) { | ||
2232 | ret = get_user(ch, ubuf++); | ||
2233 | if (ret) | ||
2234 | goto out; | ||
2235 | read++; | ||
2236 | cnt--; | ||
2237 | } | ||
2238 | |||
2239 | /* only spaces were written */ | ||
2240 | if (isspace(ch)) { | ||
2241 | *ppos += read; | ||
2242 | ret = read; | ||
2243 | goto out; | ||
2244 | } | ||
2245 | |||
2246 | iter->buffer_idx = 0; | ||
2247 | } | ||
2248 | |||
2249 | while (cnt && !isspace(ch)) { | ||
2250 | if (iter->buffer_idx < FTRACE_BUFF_MAX) | ||
2251 | iter->buffer[iter->buffer_idx++] = ch; | ||
2252 | else { | ||
2253 | ret = -EINVAL; | ||
2254 | goto out; | ||
2255 | } | ||
2256 | ret = get_user(ch, ubuf++); | ||
2257 | if (ret) | 2201 | if (ret) |
2258 | goto out; | 2202 | goto out; |
2259 | read++; | ||
2260 | cnt--; | ||
2261 | } | ||
2262 | 2203 | ||
2263 | if (isspace(ch)) { | 2204 | trace_parser_clear(parser); |
2264 | iter->buffer[iter->buffer_idx] = 0; | ||
2265 | ret = ftrace_process_regex(iter->buffer, | ||
2266 | iter->buffer_idx, enable); | ||
2267 | if (ret) | ||
2268 | goto out; | ||
2269 | iter->buffer_idx = 0; | ||
2270 | } else { | ||
2271 | iter->flags |= FTRACE_ITER_CONT; | ||
2272 | iter->buffer[iter->buffer_idx++] = ch; | ||
2273 | } | 2205 | } |
2274 | 2206 | ||
2275 | *ppos += read; | ||
2276 | ret = read; | 2207 | ret = read; |
2277 | out: | ||
2278 | mutex_unlock(&ftrace_regex_lock); | ||
2279 | 2208 | ||
2209 | mutex_unlock(&ftrace_regex_lock); | ||
2210 | out: | ||
2280 | return ret; | 2211 | return ret; |
2281 | } | 2212 | } |
2282 | 2213 | ||
@@ -2343,6 +2274,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) | |||
2343 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | 2274 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
2344 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 2275 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
2345 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | 2276 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
2277 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | ||
2346 | 2278 | ||
2347 | static int __init set_ftrace_notrace(char *str) | 2279 | static int __init set_ftrace_notrace(char *str) |
2348 | { | 2280 | { |
@@ -2358,6 +2290,31 @@ static int __init set_ftrace_filter(char *str) | |||
2358 | } | 2290 | } |
2359 | __setup("ftrace_filter=", set_ftrace_filter); | 2291 | __setup("ftrace_filter=", set_ftrace_filter); |
2360 | 2292 | ||
2293 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
2294 | static int __init set_graph_function(char *str) | ||
2295 | { | ||
2296 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | ||
2297 | return 1; | ||
2298 | } | ||
2299 | __setup("ftrace_graph_filter=", set_graph_function); | ||
2300 | |||
2301 | static void __init set_ftrace_early_graph(char *buf) | ||
2302 | { | ||
2303 | int ret; | ||
2304 | char *func; | ||
2305 | |||
2306 | while (buf) { | ||
2307 | func = strsep(&buf, ","); | ||
2308 | /* we allow only one expression at a time */ | ||
2309 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | ||
2310 | func); | ||
2311 | if (ret) | ||
2312 | printk(KERN_DEBUG "ftrace: function %s not " | ||
2313 | "traceable\n", func); | ||
2314 | } | ||
2315 | } | ||
2316 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
2317 | |||
2361 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2318 | static void __init set_ftrace_early_filter(char *buf, int enable) |
2362 | { | 2319 | { |
2363 | char *func; | 2320 | char *func; |
@@ -2374,6 +2331,10 @@ static void __init set_ftrace_early_filters(void) | |||
2374 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2331 | set_ftrace_early_filter(ftrace_filter_buf, 1); |
2375 | if (ftrace_notrace_buf[0]) | 2332 | if (ftrace_notrace_buf[0]) |
2376 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2333 | set_ftrace_early_filter(ftrace_notrace_buf, 0); |
2334 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
2335 | if (ftrace_graph_buf[0]) | ||
2336 | set_ftrace_early_graph(ftrace_graph_buf); | ||
2337 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
2377 | } | 2338 | } |
2378 | 2339 | ||
2379 | static int | 2340 | static int |
@@ -2381,6 +2342,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
2381 | { | 2342 | { |
2382 | struct seq_file *m = (struct seq_file *)file->private_data; | 2343 | struct seq_file *m = (struct seq_file *)file->private_data; |
2383 | struct ftrace_iterator *iter; | 2344 | struct ftrace_iterator *iter; |
2345 | struct trace_parser *parser; | ||
2384 | 2346 | ||
2385 | mutex_lock(&ftrace_regex_lock); | 2347 | mutex_lock(&ftrace_regex_lock); |
2386 | if (file->f_mode & FMODE_READ) { | 2348 | if (file->f_mode & FMODE_READ) { |
@@ -2390,9 +2352,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
2390 | } else | 2352 | } else |
2391 | iter = file->private_data; | 2353 | iter = file->private_data; |
2392 | 2354 | ||
2393 | if (iter->buffer_idx) { | 2355 | parser = &iter->parser; |
2394 | iter->buffer[iter->buffer_idx] = 0; | 2356 | if (trace_parser_loaded(parser)) { |
2395 | ftrace_match_records(iter->buffer, iter->buffer_idx, enable); | 2357 | parser->buffer[parser->idx] = 0; |
2358 | ftrace_match_records(parser->buffer, parser->idx, enable); | ||
2396 | } | 2359 | } |
2397 | 2360 | ||
2398 | mutex_lock(&ftrace_lock); | 2361 | mutex_lock(&ftrace_lock); |
@@ -2400,7 +2363,9 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
2400 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 2363 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
2401 | mutex_unlock(&ftrace_lock); | 2364 | mutex_unlock(&ftrace_lock); |
2402 | 2365 | ||
2366 | trace_parser_put(parser); | ||
2403 | kfree(iter); | 2367 | kfree(iter); |
2368 | |||
2404 | mutex_unlock(&ftrace_regex_lock); | 2369 | mutex_unlock(&ftrace_regex_lock); |
2405 | return 0; | 2370 | return 0; |
2406 | } | 2371 | } |
@@ -2457,11 +2422,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | |||
2457 | static void * | 2422 | static void * |
2458 | __g_next(struct seq_file *m, loff_t *pos) | 2423 | __g_next(struct seq_file *m, loff_t *pos) |
2459 | { | 2424 | { |
2460 | unsigned long *array = m->private; | ||
2461 | |||
2462 | if (*pos >= ftrace_graph_count) | 2425 | if (*pos >= ftrace_graph_count) |
2463 | return NULL; | 2426 | return NULL; |
2464 | return &array[*pos]; | 2427 | return &ftrace_graph_funcs[*pos]; |
2465 | } | 2428 | } |
2466 | 2429 | ||
2467 | static void * | 2430 | static void * |
@@ -2499,12 +2462,12 @@ static int g_show(struct seq_file *m, void *v) | |||
2499 | return 0; | 2462 | return 0; |
2500 | } | 2463 | } |
2501 | 2464 | ||
2502 | seq_printf(m, "%pf\n", v); | 2465 | seq_printf(m, "%ps\n", (void *)*ptr); |
2503 | 2466 | ||
2504 | return 0; | 2467 | return 0; |
2505 | } | 2468 | } |
2506 | 2469 | ||
2507 | static struct seq_operations ftrace_graph_seq_ops = { | 2470 | static const struct seq_operations ftrace_graph_seq_ops = { |
2508 | .start = g_start, | 2471 | .start = g_start, |
2509 | .next = g_next, | 2472 | .next = g_next, |
2510 | .stop = g_stop, | 2473 | .stop = g_stop, |
@@ -2525,16 +2488,10 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2525 | ftrace_graph_count = 0; | 2488 | ftrace_graph_count = 0; |
2526 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2489 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2527 | } | 2490 | } |
2491 | mutex_unlock(&graph_lock); | ||
2528 | 2492 | ||
2529 | if (file->f_mode & FMODE_READ) { | 2493 | if (file->f_mode & FMODE_READ) |
2530 | ret = seq_open(file, &ftrace_graph_seq_ops); | 2494 | ret = seq_open(file, &ftrace_graph_seq_ops); |
2531 | if (!ret) { | ||
2532 | struct seq_file *m = file->private_data; | ||
2533 | m->private = ftrace_graph_funcs; | ||
2534 | } | ||
2535 | } else | ||
2536 | file->private_data = ftrace_graph_funcs; | ||
2537 | mutex_unlock(&graph_lock); | ||
2538 | 2495 | ||
2539 | return ret; | 2496 | return ret; |
2540 | } | 2497 | } |
@@ -2563,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
2563 | return -ENODEV; | 2520 | return -ENODEV; |
2564 | 2521 | ||
2565 | /* decode regex */ | 2522 | /* decode regex */ |
2566 | type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); | 2523 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
2567 | if (not) | 2524 | if (not) |
2568 | return -EINVAL; | 2525 | return -EINVAL; |
2569 | 2526 | ||
@@ -2602,12 +2559,8 @@ static ssize_t | |||
2602 | ftrace_graph_write(struct file *file, const char __user *ubuf, | 2559 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
2603 | size_t cnt, loff_t *ppos) | 2560 | size_t cnt, loff_t *ppos) |
2604 | { | 2561 | { |
2605 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | 2562 | struct trace_parser parser; |
2606 | unsigned long *array; | 2563 | ssize_t read, ret; |
2607 | size_t read = 0; | ||
2608 | ssize_t ret; | ||
2609 | int index = 0; | ||
2610 | char ch; | ||
2611 | 2564 | ||
2612 | if (!cnt || cnt < 0) | 2565 | if (!cnt || cnt < 0) |
2613 | return 0; | 2566 | return 0; |
@@ -2616,60 +2569,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
2616 | 2569 | ||
2617 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { | 2570 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { |
2618 | ret = -EBUSY; | 2571 | ret = -EBUSY; |
2619 | goto out; | 2572 | goto out_unlock; |
2620 | } | 2573 | } |
2621 | 2574 | ||
2622 | if (file->f_mode & FMODE_READ) { | 2575 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { |
2623 | struct seq_file *m = file->private_data; | 2576 | ret = -ENOMEM; |
2624 | array = m->private; | 2577 | goto out_unlock; |
2625 | } else | ||
2626 | array = file->private_data; | ||
2627 | |||
2628 | ret = get_user(ch, ubuf++); | ||
2629 | if (ret) | ||
2630 | goto out; | ||
2631 | read++; | ||
2632 | cnt--; | ||
2633 | |||
2634 | /* skip white space */ | ||
2635 | while (cnt && isspace(ch)) { | ||
2636 | ret = get_user(ch, ubuf++); | ||
2637 | if (ret) | ||
2638 | goto out; | ||
2639 | read++; | ||
2640 | cnt--; | ||
2641 | } | 2578 | } |
2642 | 2579 | ||
2643 | if (isspace(ch)) { | 2580 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
2644 | *ppos += read; | ||
2645 | ret = read; | ||
2646 | goto out; | ||
2647 | } | ||
2648 | 2581 | ||
2649 | while (cnt && !isspace(ch)) { | 2582 | if (read >= 0 && trace_parser_loaded((&parser))) { |
2650 | if (index < FTRACE_BUFF_MAX) | 2583 | parser.buffer[parser.idx] = 0; |
2651 | buffer[index++] = ch; | 2584 | |
2652 | else { | 2585 | /* we allow only one expression at a time */ |
2653 | ret = -EINVAL; | 2586 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2654 | goto out; | 2587 | parser.buffer); |
2655 | } | ||
2656 | ret = get_user(ch, ubuf++); | ||
2657 | if (ret) | 2588 | if (ret) |
2658 | goto out; | 2589 | goto out_free; |
2659 | read++; | ||
2660 | cnt--; | ||
2661 | } | 2590 | } |
2662 | buffer[index] = 0; | ||
2663 | |||
2664 | /* we allow only one expression at a time */ | ||
2665 | ret = ftrace_set_func(array, &ftrace_graph_count, buffer); | ||
2666 | if (ret) | ||
2667 | goto out; | ||
2668 | |||
2669 | file->f_pos += read; | ||
2670 | 2591 | ||
2671 | ret = read; | 2592 | ret = read; |
2672 | out: | 2593 | |
2594 | out_free: | ||
2595 | trace_parser_put(&parser); | ||
2596 | out_unlock: | ||
2673 | mutex_unlock(&graph_lock); | 2597 | mutex_unlock(&graph_lock); |
2674 | 2598 | ||
2675 | return ret; | 2599 | return ret; |
@@ -2707,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2707 | return 0; | 2631 | return 0; |
2708 | } | 2632 | } |
2709 | 2633 | ||
2710 | static int ftrace_convert_nops(struct module *mod, | 2634 | static int ftrace_process_locs(struct module *mod, |
2711 | unsigned long *start, | 2635 | unsigned long *start, |
2712 | unsigned long *end) | 2636 | unsigned long *end) |
2713 | { | 2637 | { |
@@ -2740,19 +2664,17 @@ static int ftrace_convert_nops(struct module *mod, | |||
2740 | } | 2664 | } |
2741 | 2665 | ||
2742 | #ifdef CONFIG_MODULES | 2666 | #ifdef CONFIG_MODULES |
2743 | void ftrace_release(void *start, void *end) | 2667 | void ftrace_release_mod(struct module *mod) |
2744 | { | 2668 | { |
2745 | struct dyn_ftrace *rec; | 2669 | struct dyn_ftrace *rec; |
2746 | struct ftrace_page *pg; | 2670 | struct ftrace_page *pg; |
2747 | unsigned long s = (unsigned long)start; | ||
2748 | unsigned long e = (unsigned long)end; | ||
2749 | 2671 | ||
2750 | if (ftrace_disabled || !start || start == end) | 2672 | if (ftrace_disabled) |
2751 | return; | 2673 | return; |
2752 | 2674 | ||
2753 | mutex_lock(&ftrace_lock); | 2675 | mutex_lock(&ftrace_lock); |
2754 | do_for_each_ftrace_rec(pg, rec) { | 2676 | do_for_each_ftrace_rec(pg, rec) { |
2755 | if ((rec->ip >= s) && (rec->ip < e)) { | 2677 | if (within_module_core(rec->ip, mod)) { |
2756 | /* | 2678 | /* |
2757 | * rec->ip is changed in ftrace_free_rec() | 2679 | * rec->ip is changed in ftrace_free_rec() |
2758 | * It should not between s and e if record was freed. | 2680 | * It should not between s and e if record was freed. |
@@ -2769,7 +2691,7 @@ static void ftrace_init_module(struct module *mod, | |||
2769 | { | 2691 | { |
2770 | if (ftrace_disabled || start == end) | 2692 | if (ftrace_disabled || start == end) |
2771 | return; | 2693 | return; |
2772 | ftrace_convert_nops(mod, start, end); | 2694 | ftrace_process_locs(mod, start, end); |
2773 | } | 2695 | } |
2774 | 2696 | ||
2775 | static int ftrace_module_notify(struct notifier_block *self, | 2697 | static int ftrace_module_notify(struct notifier_block *self, |
@@ -2784,9 +2706,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
2784 | mod->num_ftrace_callsites); | 2706 | mod->num_ftrace_callsites); |
2785 | break; | 2707 | break; |
2786 | case MODULE_STATE_GOING: | 2708 | case MODULE_STATE_GOING: |
2787 | ftrace_release(mod->ftrace_callsites, | 2709 | ftrace_release_mod(mod); |
2788 | mod->ftrace_callsites + | ||
2789 | mod->num_ftrace_callsites); | ||
2790 | break; | 2710 | break; |
2791 | } | 2711 | } |
2792 | 2712 | ||
@@ -2832,7 +2752,7 @@ void __init ftrace_init(void) | |||
2832 | 2752 | ||
2833 | last_ftrace_enabled = ftrace_enabled = 1; | 2753 | last_ftrace_enabled = ftrace_enabled = 1; |
2834 | 2754 | ||
2835 | ret = ftrace_convert_nops(NULL, | 2755 | ret = ftrace_process_locs(NULL, |
2836 | __start_mcount_loc, | 2756 | __start_mcount_loc, |
2837 | __stop_mcount_loc); | 2757 | __stop_mcount_loc); |
2838 | 2758 | ||
@@ -2865,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { } | |||
2865 | # define ftrace_shutdown_sysctl() do { } while (0) | 2785 | # define ftrace_shutdown_sysctl() do { } while (0) |
2866 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 2786 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2867 | 2787 | ||
2868 | static ssize_t | ||
2869 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
2870 | size_t cnt, loff_t *ppos) | ||
2871 | { | ||
2872 | char buf[64]; | ||
2873 | int r; | ||
2874 | |||
2875 | if (ftrace_pid_trace == ftrace_swapper_pid) | ||
2876 | r = sprintf(buf, "swapper tasks\n"); | ||
2877 | else if (ftrace_pid_trace) | ||
2878 | r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace)); | ||
2879 | else | ||
2880 | r = sprintf(buf, "no pid\n"); | ||
2881 | |||
2882 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2883 | } | ||
2884 | |||
2885 | static void clear_ftrace_swapper(void) | 2788 | static void clear_ftrace_swapper(void) |
2886 | { | 2789 | { |
2887 | struct task_struct *p; | 2790 | struct task_struct *p; |
@@ -2932,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid) | |||
2932 | rcu_read_unlock(); | 2835 | rcu_read_unlock(); |
2933 | } | 2836 | } |
2934 | 2837 | ||
2935 | static void clear_ftrace_pid_task(struct pid **pid) | 2838 | static void clear_ftrace_pid_task(struct pid *pid) |
2936 | { | 2839 | { |
2937 | if (*pid == ftrace_swapper_pid) | 2840 | if (pid == ftrace_swapper_pid) |
2938 | clear_ftrace_swapper(); | 2841 | clear_ftrace_swapper(); |
2939 | else | 2842 | else |
2940 | clear_ftrace_pid(*pid); | 2843 | clear_ftrace_pid(pid); |
2941 | |||
2942 | *pid = NULL; | ||
2943 | } | 2844 | } |
2944 | 2845 | ||
2945 | static void set_ftrace_pid_task(struct pid *pid) | 2846 | static void set_ftrace_pid_task(struct pid *pid) |
@@ -2950,11 +2851,140 @@ static void set_ftrace_pid_task(struct pid *pid) | |||
2950 | set_ftrace_pid(pid); | 2851 | set_ftrace_pid(pid); |
2951 | } | 2852 | } |
2952 | 2853 | ||
2854 | static int ftrace_pid_add(int p) | ||
2855 | { | ||
2856 | struct pid *pid; | ||
2857 | struct ftrace_pid *fpid; | ||
2858 | int ret = -EINVAL; | ||
2859 | |||
2860 | mutex_lock(&ftrace_lock); | ||
2861 | |||
2862 | if (!p) | ||
2863 | pid = ftrace_swapper_pid; | ||
2864 | else | ||
2865 | pid = find_get_pid(p); | ||
2866 | |||
2867 | if (!pid) | ||
2868 | goto out; | ||
2869 | |||
2870 | ret = 0; | ||
2871 | |||
2872 | list_for_each_entry(fpid, &ftrace_pids, list) | ||
2873 | if (fpid->pid == pid) | ||
2874 | goto out_put; | ||
2875 | |||
2876 | ret = -ENOMEM; | ||
2877 | |||
2878 | fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); | ||
2879 | if (!fpid) | ||
2880 | goto out_put; | ||
2881 | |||
2882 | list_add(&fpid->list, &ftrace_pids); | ||
2883 | fpid->pid = pid; | ||
2884 | |||
2885 | set_ftrace_pid_task(pid); | ||
2886 | |||
2887 | ftrace_update_pid_func(); | ||
2888 | ftrace_startup_enable(0); | ||
2889 | |||
2890 | mutex_unlock(&ftrace_lock); | ||
2891 | return 0; | ||
2892 | |||
2893 | out_put: | ||
2894 | if (pid != ftrace_swapper_pid) | ||
2895 | put_pid(pid); | ||
2896 | |||
2897 | out: | ||
2898 | mutex_unlock(&ftrace_lock); | ||
2899 | return ret; | ||
2900 | } | ||
2901 | |||
2902 | static void ftrace_pid_reset(void) | ||
2903 | { | ||
2904 | struct ftrace_pid *fpid, *safe; | ||
2905 | |||
2906 | mutex_lock(&ftrace_lock); | ||
2907 | list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { | ||
2908 | struct pid *pid = fpid->pid; | ||
2909 | |||
2910 | clear_ftrace_pid_task(pid); | ||
2911 | |||
2912 | list_del(&fpid->list); | ||
2913 | kfree(fpid); | ||
2914 | } | ||
2915 | |||
2916 | ftrace_update_pid_func(); | ||
2917 | ftrace_startup_enable(0); | ||
2918 | |||
2919 | mutex_unlock(&ftrace_lock); | ||
2920 | } | ||
2921 | |||
2922 | static void *fpid_start(struct seq_file *m, loff_t *pos) | ||
2923 | { | ||
2924 | mutex_lock(&ftrace_lock); | ||
2925 | |||
2926 | if (list_empty(&ftrace_pids) && (!*pos)) | ||
2927 | return (void *) 1; | ||
2928 | |||
2929 | return seq_list_start(&ftrace_pids, *pos); | ||
2930 | } | ||
2931 | |||
2932 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | ||
2933 | { | ||
2934 | if (v == (void *)1) | ||
2935 | return NULL; | ||
2936 | |||
2937 | return seq_list_next(v, &ftrace_pids, pos); | ||
2938 | } | ||
2939 | |||
2940 | static void fpid_stop(struct seq_file *m, void *p) | ||
2941 | { | ||
2942 | mutex_unlock(&ftrace_lock); | ||
2943 | } | ||
2944 | |||
2945 | static int fpid_show(struct seq_file *m, void *v) | ||
2946 | { | ||
2947 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | ||
2948 | |||
2949 | if (v == (void *)1) { | ||
2950 | seq_printf(m, "no pid\n"); | ||
2951 | return 0; | ||
2952 | } | ||
2953 | |||
2954 | if (fpid->pid == ftrace_swapper_pid) | ||
2955 | seq_printf(m, "swapper tasks\n"); | ||
2956 | else | ||
2957 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | ||
2958 | |||
2959 | return 0; | ||
2960 | } | ||
2961 | |||
2962 | static const struct seq_operations ftrace_pid_sops = { | ||
2963 | .start = fpid_start, | ||
2964 | .next = fpid_next, | ||
2965 | .stop = fpid_stop, | ||
2966 | .show = fpid_show, | ||
2967 | }; | ||
2968 | |||
2969 | static int | ||
2970 | ftrace_pid_open(struct inode *inode, struct file *file) | ||
2971 | { | ||
2972 | int ret = 0; | ||
2973 | |||
2974 | if ((file->f_mode & FMODE_WRITE) && | ||
2975 | (file->f_flags & O_TRUNC)) | ||
2976 | ftrace_pid_reset(); | ||
2977 | |||
2978 | if (file->f_mode & FMODE_READ) | ||
2979 | ret = seq_open(file, &ftrace_pid_sops); | ||
2980 | |||
2981 | return ret; | ||
2982 | } | ||
2983 | |||
2953 | static ssize_t | 2984 | static ssize_t |
2954 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | 2985 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
2955 | size_t cnt, loff_t *ppos) | 2986 | size_t cnt, loff_t *ppos) |
2956 | { | 2987 | { |
2957 | struct pid *pid; | ||
2958 | char buf[64]; | 2988 | char buf[64]; |
2959 | long val; | 2989 | long val; |
2960 | int ret; | 2990 | int ret; |
@@ -2967,57 +2997,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
2967 | 2997 | ||
2968 | buf[cnt] = 0; | 2998 | buf[cnt] = 0; |
2969 | 2999 | ||
3000 | /* | ||
3001 | * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" | ||
3002 | * to clean the filter quietly. | ||
3003 | */ | ||
3004 | strstrip(buf); | ||
3005 | if (strlen(buf) == 0) | ||
3006 | return 1; | ||
3007 | |||
2970 | ret = strict_strtol(buf, 10, &val); | 3008 | ret = strict_strtol(buf, 10, &val); |
2971 | if (ret < 0) | 3009 | if (ret < 0) |
2972 | return ret; | 3010 | return ret; |
2973 | 3011 | ||
2974 | mutex_lock(&ftrace_lock); | 3012 | ret = ftrace_pid_add(val); |
2975 | if (val < 0) { | ||
2976 | /* disable pid tracing */ | ||
2977 | if (!ftrace_pid_trace) | ||
2978 | goto out; | ||
2979 | |||
2980 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
2981 | |||
2982 | } else { | ||
2983 | /* swapper task is special */ | ||
2984 | if (!val) { | ||
2985 | pid = ftrace_swapper_pid; | ||
2986 | if (pid == ftrace_pid_trace) | ||
2987 | goto out; | ||
2988 | } else { | ||
2989 | pid = find_get_pid(val); | ||
2990 | |||
2991 | if (pid == ftrace_pid_trace) { | ||
2992 | put_pid(pid); | ||
2993 | goto out; | ||
2994 | } | ||
2995 | } | ||
2996 | |||
2997 | if (ftrace_pid_trace) | ||
2998 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
2999 | |||
3000 | if (!pid) | ||
3001 | goto out; | ||
3002 | 3013 | ||
3003 | ftrace_pid_trace = pid; | 3014 | return ret ? ret : cnt; |
3004 | 3015 | } | |
3005 | set_ftrace_pid_task(ftrace_pid_trace); | ||
3006 | } | ||
3007 | |||
3008 | /* update the function call */ | ||
3009 | ftrace_update_pid_func(); | ||
3010 | ftrace_startup_enable(0); | ||
3011 | 3016 | ||
3012 | out: | 3017 | static int |
3013 | mutex_unlock(&ftrace_lock); | 3018 | ftrace_pid_release(struct inode *inode, struct file *file) |
3019 | { | ||
3020 | if (file->f_mode & FMODE_READ) | ||
3021 | seq_release(inode, file); | ||
3014 | 3022 | ||
3015 | return cnt; | 3023 | return 0; |
3016 | } | 3024 | } |
3017 | 3025 | ||
3018 | static const struct file_operations ftrace_pid_fops = { | 3026 | static const struct file_operations ftrace_pid_fops = { |
3019 | .read = ftrace_pid_read, | 3027 | .open = ftrace_pid_open, |
3020 | .write = ftrace_pid_write, | 3028 | .write = ftrace_pid_write, |
3029 | .read = seq_read, | ||
3030 | .llseek = seq_lseek, | ||
3031 | .release = ftrace_pid_release, | ||
3021 | }; | 3032 | }; |
3022 | 3033 | ||
3023 | static __init int ftrace_init_debugfs(void) | 3034 | static __init int ftrace_init_debugfs(void) |
@@ -3100,7 +3111,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
3100 | 3111 | ||
3101 | int | 3112 | int |
3102 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3113 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
3103 | struct file *file, void __user *buffer, size_t *lenp, | 3114 | void __user *buffer, size_t *lenp, |
3104 | loff_t *ppos) | 3115 | loff_t *ppos) |
3105 | { | 3116 | { |
3106 | int ret; | 3117 | int ret; |
@@ -3110,7 +3121,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
3110 | 3121 | ||
3111 | mutex_lock(&ftrace_lock); | 3122 | mutex_lock(&ftrace_lock); |
3112 | 3123 | ||
3113 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 3124 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
3114 | 3125 | ||
3115 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 3126 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3116 | goto out; | 3127 | goto out; |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 81b1645c8549..a91da69f153a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void) | |||
501 | return 1; | 501 | return 1; |
502 | } | 502 | } |
503 | 503 | ||
504 | if (!register_tracer(&kmem_tracer)) { | 504 | if (register_tracer(&kmem_tracer) != 0) { |
505 | pr_warning("Warning: could not register the kmem tracer\n"); | 505 | pr_warning("Warning: could not register the kmem tracer\n"); |
506 | return 1; | 506 | return 1; |
507 | } | 507 | } |
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c new file mode 100644 index 000000000000..e06c6e3d56a3 --- /dev/null +++ b/kernel/trace/power-traces.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Power trace points | ||
3 | * | ||
4 | * Copyright (C) 2009 Arjan van de Ven <arjan@linux.intel.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/workqueue.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | |||
14 | #define CREATE_TRACE_POINTS | ||
15 | #include <trace/events/power.h> | ||
16 | |||
17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); | ||
18 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); | ||
19 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); | ||
20 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 454e74e718cf..e43c928356ee 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -201,8 +201,6 @@ int tracing_is_on(void) | |||
201 | } | 201 | } |
202 | EXPORT_SYMBOL_GPL(tracing_is_on); | 202 | EXPORT_SYMBOL_GPL(tracing_is_on); |
203 | 203 | ||
204 | #include "trace.h" | ||
205 | |||
206 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 204 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
207 | #define RB_ALIGNMENT 4U | 205 | #define RB_ALIGNMENT 4U |
208 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 206 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
@@ -399,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
399 | int ret; | 397 | int ret; |
400 | 398 | ||
401 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
402 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
403 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
402 | (unsigned int)is_signed_type(u64)); | ||
404 | 403 | ||
405 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
406 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
407 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
408 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
408 | (unsigned int)is_signed_type(long)); | ||
409 | 409 | ||
410 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
411 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
412 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
413 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
414 | (unsigned int)is_signed_type(char)); | ||
414 | 415 | ||
415 | return ret; | 416 | return ret; |
416 | } | 417 | } |
@@ -701,8 +702,8 @@ static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, | |||
701 | 702 | ||
702 | val &= ~RB_FLAG_MASK; | 703 | val &= ~RB_FLAG_MASK; |
703 | 704 | ||
704 | ret = (unsigned long)cmpxchg(&list->next, | 705 | ret = cmpxchg((unsigned long *)&list->next, |
705 | val | old_flag, val | new_flag); | 706 | val | old_flag, val | new_flag); |
706 | 707 | ||
707 | /* check if the reader took the page */ | 708 | /* check if the reader took the page */ |
708 | if ((ret & ~RB_FLAG_MASK) != val) | 709 | if ((ret & ~RB_FLAG_MASK) != val) |
@@ -794,7 +795,7 @@ static int rb_head_page_replace(struct buffer_page *old, | |||
794 | val = *ptr & ~RB_FLAG_MASK; | 795 | val = *ptr & ~RB_FLAG_MASK; |
795 | val |= RB_PAGE_HEAD; | 796 | val |= RB_PAGE_HEAD; |
796 | 797 | ||
797 | ret = cmpxchg(ptr, val, &new->list); | 798 | ret = cmpxchg(ptr, val, (unsigned long)&new->list); |
798 | 799 | ||
799 | return ret == val; | 800 | return ret == val; |
800 | } | 801 | } |
@@ -2997,15 +2998,12 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
2997 | } | 2998 | } |
2998 | 2999 | ||
2999 | static struct ring_buffer_event * | 3000 | static struct ring_buffer_event * |
3000 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 3001 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) |
3001 | { | 3002 | { |
3002 | struct ring_buffer_per_cpu *cpu_buffer; | ||
3003 | struct ring_buffer_event *event; | 3003 | struct ring_buffer_event *event; |
3004 | struct buffer_page *reader; | 3004 | struct buffer_page *reader; |
3005 | int nr_loops = 0; | 3005 | int nr_loops = 0; |
3006 | 3006 | ||
3007 | cpu_buffer = buffer->buffers[cpu]; | ||
3008 | |||
3009 | again: | 3007 | again: |
3010 | /* | 3008 | /* |
3011 | * We repeat when a timestamp is encountered. It is possible | 3009 | * We repeat when a timestamp is encountered. It is possible |
@@ -3049,7 +3047,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3049 | case RINGBUF_TYPE_DATA: | 3047 | case RINGBUF_TYPE_DATA: |
3050 | if (ts) { | 3048 | if (ts) { |
3051 | *ts = cpu_buffer->read_stamp + event->time_delta; | 3049 | *ts = cpu_buffer->read_stamp + event->time_delta; |
3052 | ring_buffer_normalize_time_stamp(buffer, | 3050 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, |
3053 | cpu_buffer->cpu, ts); | 3051 | cpu_buffer->cpu, ts); |
3054 | } | 3052 | } |
3055 | return event; | 3053 | return event; |
@@ -3168,7 +3166,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3168 | local_irq_save(flags); | 3166 | local_irq_save(flags); |
3169 | if (dolock) | 3167 | if (dolock) |
3170 | spin_lock(&cpu_buffer->reader_lock); | 3168 | spin_lock(&cpu_buffer->reader_lock); |
3171 | event = rb_buffer_peek(buffer, cpu, ts); | 3169 | event = rb_buffer_peek(cpu_buffer, ts); |
3172 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3170 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3173 | rb_advance_reader(cpu_buffer); | 3171 | rb_advance_reader(cpu_buffer); |
3174 | if (dolock) | 3172 | if (dolock) |
@@ -3237,7 +3235,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3237 | if (dolock) | 3235 | if (dolock) |
3238 | spin_lock(&cpu_buffer->reader_lock); | 3236 | spin_lock(&cpu_buffer->reader_lock); |
3239 | 3237 | ||
3240 | event = rb_buffer_peek(buffer, cpu, ts); | 3238 | event = rb_buffer_peek(cpu_buffer, ts); |
3241 | if (event) | 3239 | if (event) |
3242 | rb_advance_reader(cpu_buffer); | 3240 | rb_advance_reader(cpu_buffer); |
3243 | 3241 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5c75deeefe30..026e715a0c7a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -125,19 +125,19 @@ int ftrace_dump_on_oops; | |||
125 | 125 | ||
126 | static int tracing_set_tracer(const char *buf); | 126 | static int tracing_set_tracer(const char *buf); |
127 | 127 | ||
128 | #define BOOTUP_TRACER_SIZE 100 | 128 | #define MAX_TRACER_SIZE 100 |
129 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
136 | /* We are using ftrace early, expand it */ | 136 | /* We are using ftrace early, expand it */ |
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
138 | return 1; | 138 | return 1; |
139 | } | 139 | } |
140 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); |
141 | 141 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 143 | { |
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly; | |||
242 | static struct tracer *current_trace __read_mostly; | 242 | static struct tracer *current_trace __read_mostly; |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * max_tracer_type_len is used to simplify the allocating of | ||
246 | * buffers to read userspace tracer names. We keep track of | ||
247 | * the longest tracer name registered. | ||
248 | */ | ||
249 | static int max_tracer_type_len; | ||
250 | |||
251 | /* | ||
252 | * trace_types_lock is used to protect the trace_types list. | 245 | * trace_types_lock is used to protect the trace_types list. |
253 | * This lock is also used to keep user access serialized. | 246 | * This lock is also used to keep user access serialized. |
254 | * Accesses from userspace will grab this lock while userspace | 247 | * Accesses from userspace will grab this lock while userspace |
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock); | |||
275 | */ | 268 | */ |
276 | void trace_wake_up(void) | 269 | void trace_wake_up(void) |
277 | { | 270 | { |
271 | int cpu; | ||
272 | |||
273 | if (trace_flags & TRACE_ITER_BLOCK) | ||
274 | return; | ||
278 | /* | 275 | /* |
279 | * The runqueue_is_locked() can fail, but this is the best we | 276 | * The runqueue_is_locked() can fail, but this is the best we |
280 | * have for now: | 277 | * have for now: |
281 | */ | 278 | */ |
282 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 279 | cpu = get_cpu(); |
280 | if (!runqueue_is_locked(cpu)) | ||
283 | wake_up(&trace_wait); | 281 | wake_up(&trace_wait); |
282 | put_cpu(); | ||
284 | } | 283 | } |
285 | 284 | ||
286 | static int __init set_buf_size(char *str) | 285 | static int __init set_buf_size(char *str) |
@@ -339,6 +338,112 @@ static struct { | |||
339 | 338 | ||
340 | int trace_clock_id; | 339 | int trace_clock_id; |
341 | 340 | ||
341 | /* | ||
342 | * trace_parser_get_init - gets the buffer for trace parser | ||
343 | */ | ||
344 | int trace_parser_get_init(struct trace_parser *parser, int size) | ||
345 | { | ||
346 | memset(parser, 0, sizeof(*parser)); | ||
347 | |||
348 | parser->buffer = kmalloc(size, GFP_KERNEL); | ||
349 | if (!parser->buffer) | ||
350 | return 1; | ||
351 | |||
352 | parser->size = size; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * trace_parser_put - frees the buffer for trace parser | ||
358 | */ | ||
359 | void trace_parser_put(struct trace_parser *parser) | ||
360 | { | ||
361 | kfree(parser->buffer); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * trace_get_user - reads the user input string separated by space | ||
366 | * (matched by isspace(ch)) | ||
367 | * | ||
368 | * For each string found the 'struct trace_parser' is updated, | ||
369 | * and the function returns. | ||
370 | * | ||
371 | * Returns number of bytes read. | ||
372 | * | ||
373 | * See kernel/trace/trace.h for 'struct trace_parser' details. | ||
374 | */ | ||
375 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | ||
376 | size_t cnt, loff_t *ppos) | ||
377 | { | ||
378 | char ch; | ||
379 | size_t read = 0; | ||
380 | ssize_t ret; | ||
381 | |||
382 | if (!*ppos) | ||
383 | trace_parser_clear(parser); | ||
384 | |||
385 | ret = get_user(ch, ubuf++); | ||
386 | if (ret) | ||
387 | goto out; | ||
388 | |||
389 | read++; | ||
390 | cnt--; | ||
391 | |||
392 | /* | ||
393 | * The parser is not finished with the last write, | ||
394 | * continue reading the user input without skipping spaces. | ||
395 | */ | ||
396 | if (!parser->cont) { | ||
397 | /* skip white space */ | ||
398 | while (cnt && isspace(ch)) { | ||
399 | ret = get_user(ch, ubuf++); | ||
400 | if (ret) | ||
401 | goto out; | ||
402 | read++; | ||
403 | cnt--; | ||
404 | } | ||
405 | |||
406 | /* only spaces were written */ | ||
407 | if (isspace(ch)) { | ||
408 | *ppos += read; | ||
409 | ret = read; | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | parser->idx = 0; | ||
414 | } | ||
415 | |||
416 | /* read the non-space input */ | ||
417 | while (cnt && !isspace(ch)) { | ||
418 | if (parser->idx < parser->size - 1) | ||
419 | parser->buffer[parser->idx++] = ch; | ||
420 | else { | ||
421 | ret = -EINVAL; | ||
422 | goto out; | ||
423 | } | ||
424 | ret = get_user(ch, ubuf++); | ||
425 | if (ret) | ||
426 | goto out; | ||
427 | read++; | ||
428 | cnt--; | ||
429 | } | ||
430 | |||
431 | /* We either got finished input or we have to wait for another call. */ | ||
432 | if (isspace(ch)) { | ||
433 | parser->buffer[parser->idx] = 0; | ||
434 | parser->cont = false; | ||
435 | } else { | ||
436 | parser->cont = true; | ||
437 | parser->buffer[parser->idx++] = ch; | ||
438 | } | ||
439 | |||
440 | *ppos += read; | ||
441 | ret = read; | ||
442 | |||
443 | out: | ||
444 | return ret; | ||
445 | } | ||
446 | |||
342 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 447 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
343 | { | 448 | { |
344 | int len; | 449 | int len; |
@@ -513,7 +618,6 @@ __releases(kernel_lock) | |||
513 | __acquires(kernel_lock) | 618 | __acquires(kernel_lock) |
514 | { | 619 | { |
515 | struct tracer *t; | 620 | struct tracer *t; |
516 | int len; | ||
517 | int ret = 0; | 621 | int ret = 0; |
518 | 622 | ||
519 | if (!type->name) { | 623 | if (!type->name) { |
@@ -521,6 +625,11 @@ __acquires(kernel_lock) | |||
521 | return -1; | 625 | return -1; |
522 | } | 626 | } |
523 | 627 | ||
628 | if (strlen(type->name) > MAX_TRACER_SIZE) { | ||
629 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | ||
630 | return -1; | ||
631 | } | ||
632 | |||
524 | /* | 633 | /* |
525 | * When this gets called we hold the BKL which means that | 634 | * When this gets called we hold the BKL which means that |
526 | * preemption is disabled. Various trace selftests however | 635 | * preemption is disabled. Various trace selftests however |
@@ -535,7 +644,7 @@ __acquires(kernel_lock) | |||
535 | for (t = trace_types; t; t = t->next) { | 644 | for (t = trace_types; t; t = t->next) { |
536 | if (strcmp(type->name, t->name) == 0) { | 645 | if (strcmp(type->name, t->name) == 0) { |
537 | /* already found */ | 646 | /* already found */ |
538 | pr_info("Trace %s already registered\n", | 647 | pr_info("Tracer %s already registered\n", |
539 | type->name); | 648 | type->name); |
540 | ret = -1; | 649 | ret = -1; |
541 | goto out; | 650 | goto out; |
@@ -586,9 +695,6 @@ __acquires(kernel_lock) | |||
586 | 695 | ||
587 | type->next = trace_types; | 696 | type->next = trace_types; |
588 | trace_types = type; | 697 | trace_types = type; |
589 | len = strlen(type->name); | ||
590 | if (len > max_tracer_type_len) | ||
591 | max_tracer_type_len = len; | ||
592 | 698 | ||
593 | out: | 699 | out: |
594 | tracing_selftest_running = false; | 700 | tracing_selftest_running = false; |
@@ -597,7 +703,7 @@ __acquires(kernel_lock) | |||
597 | if (ret || !default_bootup_tracer) | 703 | if (ret || !default_bootup_tracer) |
598 | goto out_unlock; | 704 | goto out_unlock; |
599 | 705 | ||
600 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | 706 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
601 | goto out_unlock; | 707 | goto out_unlock; |
602 | 708 | ||
603 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
@@ -619,14 +725,13 @@ __acquires(kernel_lock) | |||
619 | void unregister_tracer(struct tracer *type) | 725 | void unregister_tracer(struct tracer *type) |
620 | { | 726 | { |
621 | struct tracer **t; | 727 | struct tracer **t; |
622 | int len; | ||
623 | 728 | ||
624 | mutex_lock(&trace_types_lock); | 729 | mutex_lock(&trace_types_lock); |
625 | for (t = &trace_types; *t; t = &(*t)->next) { | 730 | for (t = &trace_types; *t; t = &(*t)->next) { |
626 | if (*t == type) | 731 | if (*t == type) |
627 | goto found; | 732 | goto found; |
628 | } | 733 | } |
629 | pr_info("Trace %s not registered\n", type->name); | 734 | pr_info("Tracer %s not registered\n", type->name); |
630 | goto out; | 735 | goto out; |
631 | 736 | ||
632 | found: | 737 | found: |
@@ -639,17 +744,7 @@ void unregister_tracer(struct tracer *type) | |||
639 | current_trace->stop(&global_trace); | 744 | current_trace->stop(&global_trace); |
640 | current_trace = &nop_trace; | 745 | current_trace = &nop_trace; |
641 | } | 746 | } |
642 | 747 | out: | |
643 | if (strlen(type->name) != max_tracer_type_len) | ||
644 | goto out; | ||
645 | |||
646 | max_tracer_type_len = 0; | ||
647 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
648 | len = strlen((*t)->name); | ||
649 | if (len > max_tracer_type_len) | ||
650 | max_tracer_type_len = len; | ||
651 | } | ||
652 | out: | ||
653 | mutex_unlock(&trace_types_lock); | 748 | mutex_unlock(&trace_types_lock); |
654 | } | 749 | } |
655 | 750 | ||
@@ -719,6 +814,11 @@ static void trace_init_cmdlines(void) | |||
719 | cmdline_idx = 0; | 814 | cmdline_idx = 0; |
720 | } | 815 | } |
721 | 816 | ||
817 | int is_tracing_stopped(void) | ||
818 | { | ||
819 | return trace_stop_count; | ||
820 | } | ||
821 | |||
722 | /** | 822 | /** |
723 | * ftrace_off_permanent - disable all ftrace code permanently | 823 | * ftrace_off_permanent - disable all ftrace code permanently |
724 | * | 824 | * |
@@ -886,7 +986,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
886 | 986 | ||
887 | entry->preempt_count = pc & 0xff; | 987 | entry->preempt_count = pc & 0xff; |
888 | entry->pid = (tsk) ? tsk->pid : 0; | 988 | entry->pid = (tsk) ? tsk->pid : 0; |
889 | entry->tgid = (tsk) ? tsk->tgid : 0; | 989 | entry->lock_depth = (tsk) ? tsk->lock_depth : 0; |
890 | entry->flags = | 990 | entry->flags = |
891 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 991 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
892 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 992 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -1068,6 +1168,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1068 | return; | 1168 | return; |
1069 | entry = ring_buffer_event_data(event); | 1169 | entry = ring_buffer_event_data(event); |
1070 | 1170 | ||
1171 | entry->tgid = current->tgid; | ||
1071 | memset(&entry->caller, 0, sizeof(entry->caller)); | 1172 | memset(&entry->caller, 0, sizeof(entry->caller)); |
1072 | 1173 | ||
1073 | trace.nr_entries = 0; | 1174 | trace.nr_entries = 0; |
@@ -1094,6 +1195,7 @@ ftrace_trace_special(void *__tr, | |||
1094 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1195 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
1095 | int pc) | 1196 | int pc) |
1096 | { | 1197 | { |
1198 | struct ftrace_event_call *call = &event_special; | ||
1097 | struct ring_buffer_event *event; | 1199 | struct ring_buffer_event *event; |
1098 | struct trace_array *tr = __tr; | 1200 | struct trace_array *tr = __tr; |
1099 | struct ring_buffer *buffer = tr->buffer; | 1201 | struct ring_buffer *buffer = tr->buffer; |
@@ -1107,7 +1209,9 @@ ftrace_trace_special(void *__tr, | |||
1107 | entry->arg1 = arg1; | 1209 | entry->arg1 = arg1; |
1108 | entry->arg2 = arg2; | 1210 | entry->arg2 = arg2; |
1109 | entry->arg3 = arg3; | 1211 | entry->arg3 = arg3; |
1110 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 1212 | |
1213 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1214 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1111 | } | 1215 | } |
1112 | 1216 | ||
1113 | void | 1217 | void |
@@ -1289,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1289 | 1393 | ||
1290 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1291 | { | 1395 | { |
1292 | return trace_array_printk(&global_trace, ip, fmt, args); | 1396 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1293 | } | 1397 | } |
1294 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1398 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1295 | 1399 | ||
@@ -1530,10 +1634,10 @@ static void print_lat_help_header(struct seq_file *m) | |||
1530 | seq_puts(m, "# | / _----=> need-resched \n"); | 1634 | seq_puts(m, "# | / _----=> need-resched \n"); |
1531 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 1635 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); |
1532 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 1636 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); |
1533 | seq_puts(m, "# |||| / \n"); | 1637 | seq_puts(m, "# |||| /_--=> lock-depth \n"); |
1534 | seq_puts(m, "# ||||| delay \n"); | 1638 | seq_puts(m, "# |||||/ delay \n"); |
1535 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | 1639 | seq_puts(m, "# cmd pid |||||| time | caller \n"); |
1536 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 1640 | seq_puts(m, "# \\ / |||||| \\ | / \n"); |
1537 | } | 1641 | } |
1538 | 1642 | ||
1539 | static void print_func_help_header(struct seq_file *m) | 1643 | static void print_func_help_header(struct seq_file *m) |
@@ -1845,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v) | |||
1845 | return 0; | 1949 | return 0; |
1846 | } | 1950 | } |
1847 | 1951 | ||
1848 | static struct seq_operations tracer_seq_ops = { | 1952 | static const struct seq_operations tracer_seq_ops = { |
1849 | .start = s_start, | 1953 | .start = s_start, |
1850 | .next = s_next, | 1954 | .next = s_next, |
1851 | .stop = s_stop, | 1955 | .stop = s_stop, |
@@ -1880,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1880 | if (current_trace) | 1984 | if (current_trace) |
1881 | *iter->trace = *current_trace; | 1985 | *iter->trace = *current_trace; |
1882 | 1986 | ||
1883 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 1987 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
1884 | goto fail; | 1988 | goto fail; |
1885 | 1989 | ||
1886 | cpumask_clear(iter->started); | ||
1887 | |||
1888 | if (current_trace && current_trace->print_max) | 1990 | if (current_trace && current_trace->print_max) |
1889 | iter->tr = &max_tr; | 1991 | iter->tr = &max_tr; |
1890 | else | 1992 | else |
@@ -2059,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v) | |||
2059 | return 0; | 2161 | return 0; |
2060 | } | 2162 | } |
2061 | 2163 | ||
2062 | static struct seq_operations show_traces_seq_ops = { | 2164 | static const struct seq_operations show_traces_seq_ops = { |
2063 | .start = t_start, | 2165 | .start = t_start, |
2064 | .next = t_next, | 2166 | .next = t_next, |
2065 | .stop = t_stop, | 2167 | .stop = t_stop, |
@@ -2489,7 +2591,7 @@ static ssize_t | |||
2489 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 2591 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
2490 | size_t cnt, loff_t *ppos) | 2592 | size_t cnt, loff_t *ppos) |
2491 | { | 2593 | { |
2492 | char buf[max_tracer_type_len+2]; | 2594 | char buf[MAX_TRACER_SIZE+2]; |
2493 | int r; | 2595 | int r; |
2494 | 2596 | ||
2495 | mutex_lock(&trace_types_lock); | 2597 | mutex_lock(&trace_types_lock); |
@@ -2639,15 +2741,15 @@ static ssize_t | |||
2639 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 2741 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
2640 | size_t cnt, loff_t *ppos) | 2742 | size_t cnt, loff_t *ppos) |
2641 | { | 2743 | { |
2642 | char buf[max_tracer_type_len+1]; | 2744 | char buf[MAX_TRACER_SIZE+1]; |
2643 | int i; | 2745 | int i; |
2644 | size_t ret; | 2746 | size_t ret; |
2645 | int err; | 2747 | int err; |
2646 | 2748 | ||
2647 | ret = cnt; | 2749 | ret = cnt; |
2648 | 2750 | ||
2649 | if (cnt > max_tracer_type_len) | 2751 | if (cnt > MAX_TRACER_SIZE) |
2650 | cnt = max_tracer_type_len; | 2752 | cnt = MAX_TRACER_SIZE; |
2651 | 2753 | ||
2652 | if (copy_from_user(&buf, ubuf, cnt)) | 2754 | if (copy_from_user(&buf, ubuf, cnt)) |
2653 | return -EFAULT; | 2755 | return -EFAULT; |
@@ -4285,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) | |||
4285 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4286 | goto out_free_buffer_mask; | 4388 | goto out_free_buffer_mask; |
4287 | 4389 | ||
4288 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
4289 | goto out_free_tracing_cpumask; | 4391 | goto out_free_tracing_cpumask; |
4290 | 4392 | ||
4291 | /* To save memory, keep the ring buffer size to its minimum */ | 4393 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -4296,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) | |||
4296 | 4398 | ||
4297 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4399 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4298 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4400 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4299 | cpumask_clear(tracing_reader_cpumask); | ||
4300 | 4401 | ||
4301 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4402 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4302 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4403 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ea7e0bcbd539..91c3d0e9a5a1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -7,10 +7,10 @@ | |||
7 | #include <linux/clocksource.h> | 7 | #include <linux/clocksource.h> |
8 | #include <linux/ring_buffer.h> | 8 | #include <linux/ring_buffer.h> |
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/tracepoint.h> | ||
10 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 12 | #include <trace/boot.h> |
12 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
13 | #include <trace/power.h> | ||
14 | 14 | ||
15 | #include <linux/trace_seq.h> | 15 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | 16 | #include <linux/ftrace_event.h> |
@@ -40,164 +40,60 @@ enum trace_type { | |||
40 | TRACE_HW_BRANCHES, | 40 | TRACE_HW_BRANCHES, |
41 | TRACE_KMEM_ALLOC, | 41 | TRACE_KMEM_ALLOC, |
42 | TRACE_KMEM_FREE, | 42 | TRACE_KMEM_FREE, |
43 | TRACE_POWER, | ||
44 | TRACE_BLK, | 43 | TRACE_BLK, |
45 | TRACE_KSYM, | 44 | TRACE_KSYM, |
46 | 45 | ||
47 | __TRACE_LAST_TYPE, | 46 | __TRACE_LAST_TYPE, |
48 | }; | 47 | }; |
49 | 48 | ||
50 | /* | 49 | enum kmemtrace_type_id { |
51 | * Function trace entry - function address and parent function addres: | 50 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ |
52 | */ | 51 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ |
53 | struct ftrace_entry { | 52 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ |
54 | struct trace_entry ent; | ||
55 | unsigned long ip; | ||
56 | unsigned long parent_ip; | ||
57 | }; | ||
58 | |||
59 | /* Function call entry */ | ||
60 | struct ftrace_graph_ent_entry { | ||
61 | struct trace_entry ent; | ||
62 | struct ftrace_graph_ent graph_ent; | ||
63 | }; | 53 | }; |
64 | 54 | ||
65 | /* Function return entry */ | ||
66 | struct ftrace_graph_ret_entry { | ||
67 | struct trace_entry ent; | ||
68 | struct ftrace_graph_ret ret; | ||
69 | }; | ||
70 | extern struct tracer boot_tracer; | 55 | extern struct tracer boot_tracer; |
71 | 56 | ||
72 | /* | 57 | #undef __field |
73 | * Context switch trace entry - which task (and prio) we switched from/to: | 58 | #define __field(type, item) type item; |
74 | */ | ||
75 | struct ctx_switch_entry { | ||
76 | struct trace_entry ent; | ||
77 | unsigned int prev_pid; | ||
78 | unsigned char prev_prio; | ||
79 | unsigned char prev_state; | ||
80 | unsigned int next_pid; | ||
81 | unsigned char next_prio; | ||
82 | unsigned char next_state; | ||
83 | unsigned int next_cpu; | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * Special (free-form) trace entry: | ||
88 | */ | ||
89 | struct special_entry { | ||
90 | struct trace_entry ent; | ||
91 | unsigned long arg1; | ||
92 | unsigned long arg2; | ||
93 | unsigned long arg3; | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * Stack-trace entry: | ||
98 | */ | ||
99 | |||
100 | #define FTRACE_STACK_ENTRIES 8 | ||
101 | |||
102 | struct stack_entry { | ||
103 | struct trace_entry ent; | ||
104 | unsigned long caller[FTRACE_STACK_ENTRIES]; | ||
105 | }; | ||
106 | |||
107 | struct userstack_entry { | ||
108 | struct trace_entry ent; | ||
109 | unsigned long caller[FTRACE_STACK_ENTRIES]; | ||
110 | }; | ||
111 | |||
112 | /* | ||
113 | * trace_printk entry: | ||
114 | */ | ||
115 | struct bprint_entry { | ||
116 | struct trace_entry ent; | ||
117 | unsigned long ip; | ||
118 | const char *fmt; | ||
119 | u32 buf[]; | ||
120 | }; | ||
121 | |||
122 | struct print_entry { | ||
123 | struct trace_entry ent; | ||
124 | unsigned long ip; | ||
125 | char buf[]; | ||
126 | }; | ||
127 | |||
128 | #define TRACE_OLD_SIZE 88 | ||
129 | |||
130 | struct trace_field_cont { | ||
131 | unsigned char type; | ||
132 | /* Temporary till we get rid of this completely */ | ||
133 | char buf[TRACE_OLD_SIZE - 1]; | ||
134 | }; | ||
135 | 59 | ||
136 | struct trace_mmiotrace_rw { | 60 | #undef __field_struct |
137 | struct trace_entry ent; | 61 | #define __field_struct(type, item) __field(type, item) |
138 | struct mmiotrace_rw rw; | ||
139 | }; | ||
140 | 62 | ||
141 | struct trace_mmiotrace_map { | 63 | #undef __field_desc |
142 | struct trace_entry ent; | 64 | #define __field_desc(type, container, item) |
143 | struct mmiotrace_map map; | ||
144 | }; | ||
145 | 65 | ||
146 | struct trace_boot_call { | 66 | #undef __array |
147 | struct trace_entry ent; | 67 | #define __array(type, item, size) type item[size]; |
148 | struct boot_trace_call boot_call; | ||
149 | }; | ||
150 | 68 | ||
151 | struct trace_boot_ret { | 69 | #undef __array_desc |
152 | struct trace_entry ent; | 70 | #define __array_desc(type, container, item, size) |
153 | struct boot_trace_ret boot_ret; | ||
154 | }; | ||
155 | 71 | ||
156 | #define TRACE_FUNC_SIZE 30 | 72 | #undef __dynamic_array |
157 | #define TRACE_FILE_SIZE 20 | 73 | #define __dynamic_array(type, item) type item[]; |
158 | struct trace_branch { | ||
159 | struct trace_entry ent; | ||
160 | unsigned line; | ||
161 | char func[TRACE_FUNC_SIZE+1]; | ||
162 | char file[TRACE_FILE_SIZE+1]; | ||
163 | char correct; | ||
164 | }; | ||
165 | 74 | ||
166 | struct hw_branch_entry { | 75 | #undef F_STRUCT |
167 | struct trace_entry ent; | 76 | #define F_STRUCT(args...) args |
168 | u64 from; | ||
169 | u64 to; | ||
170 | }; | ||
171 | 77 | ||
172 | struct trace_power { | 78 | #undef FTRACE_ENTRY |
173 | struct trace_entry ent; | 79 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
174 | struct power_trace state_data; | 80 | struct struct_name { \ |
175 | }; | 81 | struct trace_entry ent; \ |
82 | tstruct \ | ||
83 | } | ||
176 | 84 | ||
177 | enum kmemtrace_type_id { | 85 | #undef TP_ARGS |
178 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | 86 | #define TP_ARGS(args...) args |
179 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
180 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
181 | }; | ||
182 | 87 | ||
183 | struct kmemtrace_alloc_entry { | 88 | #undef FTRACE_ENTRY_DUP |
184 | struct trace_entry ent; | 89 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
185 | enum kmemtrace_type_id type_id; | ||
186 | unsigned long call_site; | ||
187 | const void *ptr; | ||
188 | size_t bytes_req; | ||
189 | size_t bytes_alloc; | ||
190 | gfp_t gfp_flags; | ||
191 | int node; | ||
192 | }; | ||
193 | 90 | ||
194 | struct kmemtrace_free_entry { | 91 | #include "trace_entries.h" |
195 | struct trace_entry ent; | ||
196 | enum kmemtrace_type_id type_id; | ||
197 | unsigned long call_site; | ||
198 | const void *ptr; | ||
199 | }; | ||
200 | 92 | ||
93 | /* | ||
94 | * syscalls are special, and need special handling, this is why | ||
95 | * they are not included in trace_entries.h | ||
96 | */ | ||
201 | struct syscall_trace_enter { | 97 | struct syscall_trace_enter { |
202 | struct trace_entry ent; | 98 | struct trace_entry ent; |
203 | int nr; | 99 | int nr; |
@@ -210,23 +106,12 @@ struct syscall_trace_exit { | |||
210 | unsigned long ret; | 106 | unsigned long ret; |
211 | }; | 107 | }; |
212 | 108 | ||
213 | #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" | ||
214 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
215 | |||
216 | struct ksym_trace_entry { | ||
217 | struct trace_entry ent; | ||
218 | unsigned long ip; | ||
219 | unsigned char type; | ||
220 | char ksym_name[KSYM_NAME_LEN]; | ||
221 | char cmd[TASK_COMM_LEN]; | ||
222 | }; | ||
223 | |||
224 | /* | 109 | /* |
225 | * trace_flag_type is an enumeration that holds different | 110 | * trace_flag_type is an enumeration that holds different |
226 | * states when a trace occurs. These are: | 111 | * states when a trace occurs. These are: |
227 | * IRQS_OFF - interrupts were disabled | 112 | * IRQS_OFF - interrupts were disabled |
228 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | 113 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
229 | * NEED_RESCED - reschedule is requested | 114 | * NEED_RESCHED - reschedule is requested |
230 | * HARDIRQ - inside an interrupt handler | 115 | * HARDIRQ - inside an interrupt handler |
231 | * SOFTIRQ - inside a softirq handler | 116 | * SOFTIRQ - inside a softirq handler |
232 | */ | 117 | */ |
@@ -325,7 +210,6 @@ extern void __ftrace_bad_type(void); | |||
325 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 210 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
326 | TRACE_GRAPH_RET); \ | 211 | TRACE_GRAPH_RET); \ |
327 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 212 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
328 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | ||
329 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | 213 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
330 | TRACE_KMEM_ALLOC); \ | 214 | TRACE_KMEM_ALLOC); \ |
331 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 215 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
@@ -406,7 +290,6 @@ struct tracer { | |||
406 | struct tracer *next; | 290 | struct tracer *next; |
407 | int print_max; | 291 | int print_max; |
408 | struct tracer_flags *flags; | 292 | struct tracer_flags *flags; |
409 | struct tracer_stat *stats; | ||
410 | }; | 293 | }; |
411 | 294 | ||
412 | 295 | ||
@@ -485,6 +368,10 @@ void tracing_stop_sched_switch_record(void); | |||
485 | void tracing_start_sched_switch_record(void); | 368 | void tracing_start_sched_switch_record(void); |
486 | int register_tracer(struct tracer *type); | 369 | int register_tracer(struct tracer *type); |
487 | void unregister_tracer(struct tracer *type); | 370 | void unregister_tracer(struct tracer *type); |
371 | int is_tracing_stopped(void); | ||
372 | |||
373 | #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" | ||
374 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
488 | 375 | ||
489 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 376 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
490 | 377 | ||
@@ -525,20 +412,6 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |||
525 | 412 | ||
526 | extern cycle_t ftrace_now(int cpu); | 413 | extern cycle_t ftrace_now(int cpu); |
527 | 414 | ||
528 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | ||
529 | typedef void | ||
530 | (*tracer_switch_func_t)(void *private, | ||
531 | void *__rq, | ||
532 | struct task_struct *prev, | ||
533 | struct task_struct *next); | ||
534 | |||
535 | struct tracer_switch_ops { | ||
536 | tracer_switch_func_t func; | ||
537 | void *private; | ||
538 | struct tracer_switch_ops *next; | ||
539 | }; | ||
540 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | ||
541 | |||
542 | extern void trace_find_cmdline(int pid, char comm[]); | 415 | extern void trace_find_cmdline(int pid, char comm[]); |
543 | 416 | ||
544 | #ifdef CONFIG_DYNAMIC_FTRACE | 417 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -621,10 +494,6 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
621 | return 0; | 494 | return 0; |
622 | } | 495 | } |
623 | #else | 496 | #else |
624 | static inline int ftrace_trace_addr(unsigned long addr) | ||
625 | { | ||
626 | return 1; | ||
627 | } | ||
628 | static inline int ftrace_graph_addr(unsigned long addr) | 497 | static inline int ftrace_graph_addr(unsigned long addr) |
629 | { | 498 | { |
630 | return 1; | 499 | return 1; |
@@ -638,12 +507,12 @@ print_graph_function(struct trace_iterator *iter) | |||
638 | } | 507 | } |
639 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 508 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
640 | 509 | ||
641 | extern struct pid *ftrace_pid_trace; | 510 | extern struct list_head ftrace_pids; |
642 | 511 | ||
643 | #ifdef CONFIG_FUNCTION_TRACER | 512 | #ifdef CONFIG_FUNCTION_TRACER |
644 | static inline int ftrace_trace_task(struct task_struct *task) | 513 | static inline int ftrace_trace_task(struct task_struct *task) |
645 | { | 514 | { |
646 | if (!ftrace_pid_trace) | 515 | if (list_empty(&ftrace_pids)) |
647 | return 1; | 516 | return 1; |
648 | 517 | ||
649 | return test_tsk_trace_trace(task); | 518 | return test_tsk_trace_trace(task); |
@@ -656,6 +525,41 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
656 | #endif | 525 | #endif |
657 | 526 | ||
658 | /* | 527 | /* |
528 | * struct trace_parser - servers for reading the user input separated by spaces | ||
529 | * @cont: set if the input is not complete - no final space char was found | ||
530 | * @buffer: holds the parsed user input | ||
531 | * @idx: user input lenght | ||
532 | * @size: buffer size | ||
533 | */ | ||
534 | struct trace_parser { | ||
535 | bool cont; | ||
536 | char *buffer; | ||
537 | unsigned idx; | ||
538 | unsigned size; | ||
539 | }; | ||
540 | |||
541 | static inline bool trace_parser_loaded(struct trace_parser *parser) | ||
542 | { | ||
543 | return (parser->idx != 0); | ||
544 | } | ||
545 | |||
546 | static inline bool trace_parser_cont(struct trace_parser *parser) | ||
547 | { | ||
548 | return parser->cont; | ||
549 | } | ||
550 | |||
551 | static inline void trace_parser_clear(struct trace_parser *parser) | ||
552 | { | ||
553 | parser->cont = false; | ||
554 | parser->idx = 0; | ||
555 | } | ||
556 | |||
557 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | ||
558 | extern void trace_parser_put(struct trace_parser *parser); | ||
559 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | ||
560 | size_t cnt, loff_t *ppos); | ||
561 | |||
562 | /* | ||
659 | * trace_iterator_flags is an enumeration that defines bit | 563 | * trace_iterator_flags is an enumeration that defines bit |
660 | * positions into trace_flags that controls the output. | 564 | * positions into trace_flags that controls the output. |
661 | * | 565 | * |
@@ -790,7 +694,6 @@ struct event_filter { | |||
790 | int n_preds; | 694 | int n_preds; |
791 | struct filter_pred **preds; | 695 | struct filter_pred **preds; |
792 | char *filter_string; | 696 | char *filter_string; |
793 | bool no_reset; | ||
794 | }; | 697 | }; |
795 | 698 | ||
796 | struct event_subsystem { | 699 | struct event_subsystem { |
@@ -802,22 +705,40 @@ struct event_subsystem { | |||
802 | }; | 705 | }; |
803 | 706 | ||
804 | struct filter_pred; | 707 | struct filter_pred; |
708 | struct regex; | ||
805 | 709 | ||
806 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, | 710 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, |
807 | int val1, int val2); | 711 | int val1, int val2); |
808 | 712 | ||
809 | struct filter_pred { | 713 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
810 | filter_pred_fn_t fn; | 714 | |
811 | u64 val; | 715 | enum regex_type { |
812 | char str_val[MAX_FILTER_STR_VAL]; | 716 | MATCH_FULL = 0, |
813 | int str_len; | 717 | MATCH_FRONT_ONLY, |
814 | char *field_name; | 718 | MATCH_MIDDLE_ONLY, |
815 | int offset; | 719 | MATCH_END_ONLY, |
816 | int not; | 720 | }; |
817 | int op; | 721 | |
818 | int pop_n; | 722 | struct regex { |
723 | char pattern[MAX_FILTER_STR_VAL]; | ||
724 | int len; | ||
725 | int field_len; | ||
726 | regex_match_func match; | ||
819 | }; | 727 | }; |
820 | 728 | ||
729 | struct filter_pred { | ||
730 | filter_pred_fn_t fn; | ||
731 | u64 val; | ||
732 | struct regex regex; | ||
733 | char *field_name; | ||
734 | int offset; | ||
735 | int not; | ||
736 | int op; | ||
737 | int pop_n; | ||
738 | }; | ||
739 | |||
740 | extern enum regex_type | ||
741 | filter_parse_regex(char *buff, int len, char **search, int *not); | ||
821 | extern void print_event_filter(struct ftrace_event_call *call, | 742 | extern void print_event_filter(struct ftrace_event_call *call, |
822 | struct trace_seq *s); | 743 | struct trace_seq *s); |
823 | extern int apply_event_filter(struct ftrace_event_call *call, | 744 | extern int apply_event_filter(struct ftrace_event_call *call, |
@@ -833,7 +754,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
833 | struct ring_buffer *buffer, | 754 | struct ring_buffer *buffer, |
834 | struct ring_buffer_event *event) | 755 | struct ring_buffer_event *event) |
835 | { | 756 | { |
836 | if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { | 757 | if (unlikely(call->filter_active) && |
758 | !filter_match_preds(call->filter, rec)) { | ||
837 | ring_buffer_discard_commit(buffer, event); | 759 | ring_buffer_discard_commit(buffer, event); |
838 | return 1; | 760 | return 1; |
839 | } | 761 | } |
@@ -841,58 +763,18 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
841 | return 0; | 763 | return 0; |
842 | } | 764 | } |
843 | 765 | ||
844 | #define DEFINE_COMPARISON_PRED(type) \ | ||
845 | static int filter_pred_##type(struct filter_pred *pred, void *event, \ | ||
846 | int val1, int val2) \ | ||
847 | { \ | ||
848 | type *addr = (type *)(event + pred->offset); \ | ||
849 | type val = (type)pred->val; \ | ||
850 | int match = 0; \ | ||
851 | \ | ||
852 | switch (pred->op) { \ | ||
853 | case OP_LT: \ | ||
854 | match = (*addr < val); \ | ||
855 | break; \ | ||
856 | case OP_LE: \ | ||
857 | match = (*addr <= val); \ | ||
858 | break; \ | ||
859 | case OP_GT: \ | ||
860 | match = (*addr > val); \ | ||
861 | break; \ | ||
862 | case OP_GE: \ | ||
863 | match = (*addr >= val); \ | ||
864 | break; \ | ||
865 | default: \ | ||
866 | break; \ | ||
867 | } \ | ||
868 | \ | ||
869 | return match; \ | ||
870 | } | ||
871 | |||
872 | #define DEFINE_EQUALITY_PRED(size) \ | ||
873 | static int filter_pred_##size(struct filter_pred *pred, void *event, \ | ||
874 | int val1, int val2) \ | ||
875 | { \ | ||
876 | u##size *addr = (u##size *)(event + pred->offset); \ | ||
877 | u##size val = (u##size)pred->val; \ | ||
878 | int match; \ | ||
879 | \ | ||
880 | match = (val == *addr) ^ pred->not; \ | ||
881 | \ | ||
882 | return match; \ | ||
883 | } | ||
884 | |||
885 | extern struct mutex event_mutex; | 766 | extern struct mutex event_mutex; |
886 | extern struct list_head ftrace_events; | 767 | extern struct list_head ftrace_events; |
887 | 768 | ||
888 | extern const char *__start___trace_bprintk_fmt[]; | 769 | extern const char *__start___trace_bprintk_fmt[]; |
889 | extern const char *__stop___trace_bprintk_fmt[]; | 770 | extern const char *__stop___trace_bprintk_fmt[]; |
890 | 771 | ||
891 | #undef TRACE_EVENT_FORMAT | 772 | #undef FTRACE_ENTRY |
892 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | 773 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
893 | extern struct ftrace_event_call event_##call; | 774 | extern struct ftrace_event_call event_##call; |
894 | #undef TRACE_EVENT_FORMAT_NOFILTER | 775 | #undef FTRACE_ENTRY_DUP |
895 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt) | 776 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
896 | #include "trace_event_types.h" | 777 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
778 | #include "trace_entries.h" | ||
897 | 779 | ||
898 | #endif /* _LINUX_KERNEL_TRACE_H */ | 780 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 19bfc75d467e..c21d5f3956ad 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -129,6 +129,7 @@ struct tracer boot_tracer __read_mostly = | |||
129 | 129 | ||
130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | 130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) |
131 | { | 131 | { |
132 | struct ftrace_event_call *call = &event_boot_call; | ||
132 | struct ring_buffer_event *event; | 133 | struct ring_buffer_event *event; |
133 | struct ring_buffer *buffer; | 134 | struct ring_buffer *buffer; |
134 | struct trace_boot_call *entry; | 135 | struct trace_boot_call *entry; |
@@ -150,13 +151,15 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
150 | goto out; | 151 | goto out; |
151 | entry = ring_buffer_event_data(event); | 152 | entry = ring_buffer_event_data(event); |
152 | entry->boot_call = *bt; | 153 | entry->boot_call = *bt; |
153 | trace_buffer_unlock_commit(buffer, event, 0, 0); | 154 | if (!filter_check_discard(call, entry, buffer, event)) |
155 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
154 | out: | 156 | out: |
155 | preempt_enable(); | 157 | preempt_enable(); |
156 | } | 158 | } |
157 | 159 | ||
158 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | 160 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) |
159 | { | 161 | { |
162 | struct ftrace_event_call *call = &event_boot_ret; | ||
160 | struct ring_buffer_event *event; | 163 | struct ring_buffer_event *event; |
161 | struct ring_buffer *buffer; | 164 | struct ring_buffer *buffer; |
162 | struct trace_boot_ret *entry; | 165 | struct trace_boot_ret *entry; |
@@ -175,7 +178,8 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
175 | goto out; | 178 | goto out; |
176 | entry = ring_buffer_event_data(event); | 179 | entry = ring_buffer_event_data(event); |
177 | entry->boot_ret = *bt; | 180 | entry->boot_ret = *bt; |
178 | trace_buffer_unlock_commit(buffer, event, 0, 0); | 181 | if (!filter_check_discard(call, entry, buffer, event)) |
182 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
179 | out: | 183 | out: |
180 | preempt_enable(); | 184 | preempt_enable(); |
181 | } | 185 | } |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 7a7a9fd249a9..4a194f08f88c 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
34 | struct trace_array *tr = branch_tracer; | 34 | struct trace_array *tr = branch_tracer; |
35 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
36 | struct trace_branch *entry; | 36 | struct trace_branch *entry; |
37 | struct ring_buffer *buffer; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | int cpu, pc; | 39 | int cpu, pc; |
39 | const char *p; | 40 | const char *p; |
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
54 | goto out; | 55 | goto out; |
55 | 56 | ||
56 | pc = preempt_count(); | 57 | pc = preempt_count(); |
57 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, | 58 | buffer = tr->buffer; |
59 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, | ||
58 | sizeof(*entry), flags, pc); | 60 | sizeof(*entry), flags, pc); |
59 | if (!event) | 61 | if (!event) |
60 | goto out; | 62 | goto out; |
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | entry->line = f->line; | 76 | entry->line = f->line; |
75 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
76 | 78 | ||
77 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
78 | ring_buffer_unlock_commit(tr->buffer, event); | 80 | ring_buffer_unlock_commit(buffer, event); |
79 | 81 | ||
80 | out: | 82 | out: |
81 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index b588fd81f7f9..20c5f92e28a8 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -66,10 +66,14 @@ u64 notrace trace_clock(void) | |||
66 | * Used by plugins that need globally coherent timestamps. | 66 | * Used by plugins that need globally coherent timestamps. |
67 | */ | 67 | */ |
68 | 68 | ||
69 | static u64 prev_trace_clock_time; | 69 | /* keep prev_time and lock in the same cacheline. */ |
70 | 70 | static struct { | |
71 | static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = | 71 | u64 prev_time; |
72 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 72 | raw_spinlock_t lock; |
73 | } trace_clock_struct ____cacheline_aligned_in_smp = | ||
74 | { | ||
75 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | ||
76 | }; | ||
73 | 77 | ||
74 | u64 notrace trace_clock_global(void) | 78 | u64 notrace trace_clock_global(void) |
75 | { | 79 | { |
@@ -88,19 +92,19 @@ u64 notrace trace_clock_global(void) | |||
88 | if (unlikely(in_nmi())) | 92 | if (unlikely(in_nmi())) |
89 | goto out; | 93 | goto out; |
90 | 94 | ||
91 | __raw_spin_lock(&trace_clock_lock); | 95 | __raw_spin_lock(&trace_clock_struct.lock); |
92 | 96 | ||
93 | /* | 97 | /* |
94 | * TODO: if this happens often then maybe we should reset | 98 | * TODO: if this happens often then maybe we should reset |
95 | * my_scd->clock to prev_trace_clock_time+1, to make sure | 99 | * my_scd->clock to prev_time+1, to make sure |
96 | * we start ticking with the local clock from now on? | 100 | * we start ticking with the local clock from now on? |
97 | */ | 101 | */ |
98 | if ((s64)(now - prev_trace_clock_time) < 0) | 102 | if ((s64)(now - trace_clock_struct.prev_time) < 0) |
99 | now = prev_trace_clock_time + 1; | 103 | now = trace_clock_struct.prev_time + 1; |
100 | 104 | ||
101 | prev_trace_clock_time = now; | 105 | trace_clock_struct.prev_time = now; |
102 | 106 | ||
103 | __raw_spin_unlock(&trace_clock_lock); | 107 | __raw_spin_unlock(&trace_clock_struct.lock); |
104 | 108 | ||
105 | out: | 109 | out: |
106 | raw_local_irq_restore(flags); | 110 | raw_local_irq_restore(flags); |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h new file mode 100644 index 000000000000..e19747d4f860 --- /dev/null +++ b/kernel/trace/trace_entries.h | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * This file defines the trace event structures that go into the ring | ||
3 | * buffer directly. They are created via macros so that changes for them | ||
4 | * appear in the format file. Using macros will automate this process. | ||
5 | * | ||
6 | * The macro used to create a ftrace data structure is: | ||
7 | * | ||
8 | * FTRACE_ENTRY( name, struct_name, id, structure, print ) | ||
9 | * | ||
10 | * @name: the name used the event name, as well as the name of | ||
11 | * the directory that holds the format file. | ||
12 | * | ||
13 | * @struct_name: the name of the structure that is created. | ||
14 | * | ||
15 | * @id: The event identifier that is used to detect what event | ||
16 | * this is from the ring buffer. | ||
17 | * | ||
18 | * @structure: the structure layout | ||
19 | * | ||
20 | * - __field( type, item ) | ||
21 | * This is equivalent to declaring | ||
22 | * type item; | ||
23 | * in the structure. | ||
24 | * - __array( type, item, size ) | ||
25 | * This is equivalent to declaring | ||
26 | * type item[size]; | ||
27 | * in the structure. | ||
28 | * | ||
29 | * * for structures within structures, the format of the internal | ||
30 | * structure is layed out. This allows the internal structure | ||
31 | * to be deciphered for the format file. Although these macros | ||
32 | * may become out of sync with the internal structure, they | ||
33 | * will create a compile error if it happens. Since the | ||
34 | * internel structures are just tracing helpers, this is not | ||
35 | * an issue. | ||
36 | * | ||
37 | * When an internal structure is used, it should use: | ||
38 | * | ||
39 | * __field_struct( type, item ) | ||
40 | * | ||
41 | * instead of __field. This will prevent it from being shown in | ||
42 | * the output file. The fields in the structure should use. | ||
43 | * | ||
44 | * __field_desc( type, container, item ) | ||
45 | * __array_desc( type, container, item, len ) | ||
46 | * | ||
47 | * type, item and len are the same as __field and __array, but | ||
48 | * container is added. This is the name of the item in | ||
49 | * __field_struct that this is describing. | ||
50 | * | ||
51 | * | ||
52 | * @print: the print format shown to users in the format file. | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | * Function trace entry - function address and parent function addres: | ||
57 | */ | ||
58 | FTRACE_ENTRY(function, ftrace_entry, | ||
59 | |||
60 | TRACE_FN, | ||
61 | |||
62 | F_STRUCT( | ||
63 | __field( unsigned long, ip ) | ||
64 | __field( unsigned long, parent_ip ) | ||
65 | ), | ||
66 | |||
67 | F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip) | ||
68 | ); | ||
69 | |||
70 | /* Function call entry */ | ||
71 | FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, | ||
72 | |||
73 | TRACE_GRAPH_ENT, | ||
74 | |||
75 | F_STRUCT( | ||
76 | __field_struct( struct ftrace_graph_ent, graph_ent ) | ||
77 | __field_desc( unsigned long, graph_ent, func ) | ||
78 | __field_desc( int, graph_ent, depth ) | ||
79 | ), | ||
80 | |||
81 | F_printk("--> %lx (%d)", __entry->func, __entry->depth) | ||
82 | ); | ||
83 | |||
84 | /* Function return entry */ | ||
85 | FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, | ||
86 | |||
87 | TRACE_GRAPH_RET, | ||
88 | |||
89 | F_STRUCT( | ||
90 | __field_struct( struct ftrace_graph_ret, ret ) | ||
91 | __field_desc( unsigned long, ret, func ) | ||
92 | __field_desc( unsigned long long, ret, calltime) | ||
93 | __field_desc( unsigned long long, ret, rettime ) | ||
94 | __field_desc( unsigned long, ret, overrun ) | ||
95 | __field_desc( int, ret, depth ) | ||
96 | ), | ||
97 | |||
98 | F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", | ||
99 | __entry->func, __entry->depth, | ||
100 | __entry->calltime, __entry->rettime, | ||
101 | __entry->depth) | ||
102 | ); | ||
103 | |||
104 | /* | ||
105 | * Context switch trace entry - which task (and prio) we switched from/to: | ||
106 | * | ||
107 | * This is used for both wakeup and context switches. We only want | ||
108 | * to create one structure, but we need two outputs for it. | ||
109 | */ | ||
110 | #define FTRACE_CTX_FIELDS \ | ||
111 | __field( unsigned int, prev_pid ) \ | ||
112 | __field( unsigned char, prev_prio ) \ | ||
113 | __field( unsigned char, prev_state ) \ | ||
114 | __field( unsigned int, next_pid ) \ | ||
115 | __field( unsigned char, next_prio ) \ | ||
116 | __field( unsigned char, next_state ) \ | ||
117 | __field( unsigned int, next_cpu ) | ||
118 | |||
119 | FTRACE_ENTRY(context_switch, ctx_switch_entry, | ||
120 | |||
121 | TRACE_CTX, | ||
122 | |||
123 | F_STRUCT( | ||
124 | FTRACE_CTX_FIELDS | ||
125 | ), | ||
126 | |||
127 | F_printk("%u:%u:%u ==> %u:%u:%u [%03u]", | ||
128 | __entry->prev_pid, __entry->prev_prio, __entry->prev_state, | ||
129 | __entry->next_pid, __entry->next_prio, __entry->next_state, | ||
130 | __entry->next_cpu | ||
131 | ) | ||
132 | ); | ||
133 | |||
134 | /* | ||
135 | * FTRACE_ENTRY_DUP only creates the format file, it will not | ||
136 | * create another structure. | ||
137 | */ | ||
138 | FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, | ||
139 | |||
140 | TRACE_WAKE, | ||
141 | |||
142 | F_STRUCT( | ||
143 | FTRACE_CTX_FIELDS | ||
144 | ), | ||
145 | |||
146 | F_printk("%u:%u:%u ==+ %u:%u:%u [%03u]", | ||
147 | __entry->prev_pid, __entry->prev_prio, __entry->prev_state, | ||
148 | __entry->next_pid, __entry->next_prio, __entry->next_state, | ||
149 | __entry->next_cpu | ||
150 | ) | ||
151 | ); | ||
152 | |||
153 | /* | ||
154 | * Special (free-form) trace entry: | ||
155 | */ | ||
156 | FTRACE_ENTRY(special, special_entry, | ||
157 | |||
158 | TRACE_SPECIAL, | ||
159 | |||
160 | F_STRUCT( | ||
161 | __field( unsigned long, arg1 ) | ||
162 | __field( unsigned long, arg2 ) | ||
163 | __field( unsigned long, arg3 ) | ||
164 | ), | ||
165 | |||
166 | F_printk("(%08lx) (%08lx) (%08lx)", | ||
167 | __entry->arg1, __entry->arg2, __entry->arg3) | ||
168 | ); | ||
169 | |||
170 | /* | ||
171 | * Stack-trace entry: | ||
172 | */ | ||
173 | |||
174 | #define FTRACE_STACK_ENTRIES 8 | ||
175 | |||
176 | FTRACE_ENTRY(kernel_stack, stack_entry, | ||
177 | |||
178 | TRACE_STACK, | ||
179 | |||
180 | F_STRUCT( | ||
181 | __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) | ||
182 | ), | ||
183 | |||
184 | F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" | ||
185 | "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", | ||
186 | __entry->caller[0], __entry->caller[1], __entry->caller[2], | ||
187 | __entry->caller[3], __entry->caller[4], __entry->caller[5], | ||
188 | __entry->caller[6], __entry->caller[7]) | ||
189 | ); | ||
190 | |||
191 | FTRACE_ENTRY(user_stack, userstack_entry, | ||
192 | |||
193 | TRACE_USER_STACK, | ||
194 | |||
195 | F_STRUCT( | ||
196 | __field( unsigned int, tgid ) | ||
197 | __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) | ||
198 | ), | ||
199 | |||
200 | F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" | ||
201 | "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", | ||
202 | __entry->caller[0], __entry->caller[1], __entry->caller[2], | ||
203 | __entry->caller[3], __entry->caller[4], __entry->caller[5], | ||
204 | __entry->caller[6], __entry->caller[7]) | ||
205 | ); | ||
206 | |||
207 | /* | ||
208 | * trace_printk entry: | ||
209 | */ | ||
210 | FTRACE_ENTRY(bprint, bprint_entry, | ||
211 | |||
212 | TRACE_BPRINT, | ||
213 | |||
214 | F_STRUCT( | ||
215 | __field( unsigned long, ip ) | ||
216 | __field( const char *, fmt ) | ||
217 | __dynamic_array( u32, buf ) | ||
218 | ), | ||
219 | |||
220 | F_printk("%08lx fmt:%p", | ||
221 | __entry->ip, __entry->fmt) | ||
222 | ); | ||
223 | |||
224 | FTRACE_ENTRY(print, print_entry, | ||
225 | |||
226 | TRACE_PRINT, | ||
227 | |||
228 | F_STRUCT( | ||
229 | __field( unsigned long, ip ) | ||
230 | __dynamic_array( char, buf ) | ||
231 | ), | ||
232 | |||
233 | F_printk("%08lx %s", | ||
234 | __entry->ip, __entry->buf) | ||
235 | ); | ||
236 | |||
237 | FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, | ||
238 | |||
239 | TRACE_MMIO_RW, | ||
240 | |||
241 | F_STRUCT( | ||
242 | __field_struct( struct mmiotrace_rw, rw ) | ||
243 | __field_desc( resource_size_t, rw, phys ) | ||
244 | __field_desc( unsigned long, rw, value ) | ||
245 | __field_desc( unsigned long, rw, pc ) | ||
246 | __field_desc( int, rw, map_id ) | ||
247 | __field_desc( unsigned char, rw, opcode ) | ||
248 | __field_desc( unsigned char, rw, width ) | ||
249 | ), | ||
250 | |||
251 | F_printk("%lx %lx %lx %d %x %x", | ||
252 | (unsigned long)__entry->phys, __entry->value, __entry->pc, | ||
253 | __entry->map_id, __entry->opcode, __entry->width) | ||
254 | ); | ||
255 | |||
256 | FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, | ||
257 | |||
258 | TRACE_MMIO_MAP, | ||
259 | |||
260 | F_STRUCT( | ||
261 | __field_struct( struct mmiotrace_map, map ) | ||
262 | __field_desc( resource_size_t, map, phys ) | ||
263 | __field_desc( unsigned long, map, virt ) | ||
264 | __field_desc( unsigned long, map, len ) | ||
265 | __field_desc( int, map, map_id ) | ||
266 | __field_desc( unsigned char, map, opcode ) | ||
267 | ), | ||
268 | |||
269 | F_printk("%lx %lx %lx %d %x", | ||
270 | (unsigned long)__entry->phys, __entry->virt, __entry->len, | ||
271 | __entry->map_id, __entry->opcode) | ||
272 | ); | ||
273 | |||
274 | FTRACE_ENTRY(boot_call, trace_boot_call, | ||
275 | |||
276 | TRACE_BOOT_CALL, | ||
277 | |||
278 | F_STRUCT( | ||
279 | __field_struct( struct boot_trace_call, boot_call ) | ||
280 | __field_desc( pid_t, boot_call, caller ) | ||
281 | __array_desc( char, boot_call, func, KSYM_SYMBOL_LEN) | ||
282 | ), | ||
283 | |||
284 | F_printk("%d %s", __entry->caller, __entry->func) | ||
285 | ); | ||
286 | |||
287 | FTRACE_ENTRY(boot_ret, trace_boot_ret, | ||
288 | |||
289 | TRACE_BOOT_RET, | ||
290 | |||
291 | F_STRUCT( | ||
292 | __field_struct( struct boot_trace_ret, boot_ret ) | ||
293 | __array_desc( char, boot_ret, func, KSYM_SYMBOL_LEN) | ||
294 | __field_desc( int, boot_ret, result ) | ||
295 | __field_desc( unsigned long, boot_ret, duration ) | ||
296 | ), | ||
297 | |||
298 | F_printk("%s %d %lx", | ||
299 | __entry->func, __entry->result, __entry->duration) | ||
300 | ); | ||
301 | |||
302 | #define TRACE_FUNC_SIZE 30 | ||
303 | #define TRACE_FILE_SIZE 20 | ||
304 | |||
305 | FTRACE_ENTRY(branch, trace_branch, | ||
306 | |||
307 | TRACE_BRANCH, | ||
308 | |||
309 | F_STRUCT( | ||
310 | __field( unsigned int, line ) | ||
311 | __array( char, func, TRACE_FUNC_SIZE+1 ) | ||
312 | __array( char, file, TRACE_FILE_SIZE+1 ) | ||
313 | __field( char, correct ) | ||
314 | ), | ||
315 | |||
316 | F_printk("%u:%s:%s (%u)", | ||
317 | __entry->line, | ||
318 | __entry->func, __entry->file, __entry->correct) | ||
319 | ); | ||
320 | |||
321 | FTRACE_ENTRY(hw_branch, hw_branch_entry, | ||
322 | |||
323 | TRACE_HW_BRANCHES, | ||
324 | |||
325 | F_STRUCT( | ||
326 | __field( u64, from ) | ||
327 | __field( u64, to ) | ||
328 | ), | ||
329 | |||
330 | F_printk("from: %llx to: %llx", __entry->from, __entry->to) | ||
331 | ); | ||
332 | |||
333 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | ||
334 | |||
335 | TRACE_KMEM_ALLOC, | ||
336 | |||
337 | F_STRUCT( | ||
338 | __field( enum kmemtrace_type_id, type_id ) | ||
339 | __field( unsigned long, call_site ) | ||
340 | __field( const void *, ptr ) | ||
341 | __field( size_t, bytes_req ) | ||
342 | __field( size_t, bytes_alloc ) | ||
343 | __field( gfp_t, gfp_flags ) | ||
344 | __field( int, node ) | ||
345 | ), | ||
346 | |||
347 | F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi" | ||
348 | " flags:%x node:%d", | ||
349 | __entry->type_id, __entry->call_site, __entry->ptr, | ||
350 | __entry->bytes_req, __entry->bytes_alloc, | ||
351 | __entry->gfp_flags, __entry->node) | ||
352 | ); | ||
353 | |||
354 | FTRACE_ENTRY(kmem_free, kmemtrace_free_entry, | ||
355 | |||
356 | TRACE_KMEM_FREE, | ||
357 | |||
358 | F_STRUCT( | ||
359 | __field( enum kmemtrace_type_id, type_id ) | ||
360 | __field( unsigned long, call_site ) | ||
361 | __field( const void *, ptr ) | ||
362 | ), | ||
363 | |||
364 | F_printk("type:%u call_site:%lx ptr:%p", | ||
365 | __entry->type_id, __entry->call_site, __entry->ptr) | ||
366 | ); | ||
367 | |||
368 | FTRACE_ENTRY(ksym_trace, ksym_trace_entry, | ||
369 | |||
370 | TRACE_KSYM, | ||
371 | |||
372 | F_STRUCT( | ||
373 | __field( unsigned long, ip ) | ||
374 | __field( unsigned char, type ) | ||
375 | __array( char , ksym_name, KSYM_NAME_LEN ) | ||
376 | __array( char , cmd, TASK_COMM_LEN ) | ||
377 | ), | ||
378 | |||
379 | F_printk("ip: %pF type: %d ksym_name: %s cmd: %s", | ||
380 | (void *)__entry->ip, (unsigned int)__entry->type, | ||
381 | __entry->ksym_name, __entry->cmd) | ||
382 | ); | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 11ba5bb4ed0a..8d5c171cc998 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -5,8 +5,65 @@ | |||
5 | * | 5 | * |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | ||
8 | #include "trace.h" | 9 | #include "trace.h" |
9 | 10 | ||
11 | /* | ||
12 | * We can't use a size but a type in alloc_percpu() | ||
13 | * So let's create a dummy type that matches the desired size | ||
14 | */ | ||
15 | typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; | ||
16 | |||
17 | char *trace_profile_buf; | ||
18 | EXPORT_SYMBOL_GPL(trace_profile_buf); | ||
19 | |||
20 | char *trace_profile_buf_nmi; | ||
21 | EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); | ||
22 | |||
23 | /* Count the events in use (per event id, not per instance) */ | ||
24 | static int total_profile_count; | ||
25 | |||
26 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | ||
27 | { | ||
28 | char *buf; | ||
29 | int ret = -ENOMEM; | ||
30 | |||
31 | if (atomic_inc_return(&event->profile_count)) | ||
32 | return 0; | ||
33 | |||
34 | if (!total_profile_count) { | ||
35 | buf = (char *)alloc_percpu(profile_buf_t); | ||
36 | if (!buf) | ||
37 | goto fail_buf; | ||
38 | |||
39 | rcu_assign_pointer(trace_profile_buf, buf); | ||
40 | |||
41 | buf = (char *)alloc_percpu(profile_buf_t); | ||
42 | if (!buf) | ||
43 | goto fail_buf_nmi; | ||
44 | |||
45 | rcu_assign_pointer(trace_profile_buf_nmi, buf); | ||
46 | } | ||
47 | |||
48 | ret = event->profile_enable(); | ||
49 | if (!ret) { | ||
50 | total_profile_count++; | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | fail_buf_nmi: | ||
55 | if (!total_profile_count) { | ||
56 | free_percpu(trace_profile_buf_nmi); | ||
57 | free_percpu(trace_profile_buf); | ||
58 | trace_profile_buf_nmi = NULL; | ||
59 | trace_profile_buf = NULL; | ||
60 | } | ||
61 | fail_buf: | ||
62 | atomic_dec(&event->profile_count); | ||
63 | |||
64 | return ret; | ||
65 | } | ||
66 | |||
10 | int ftrace_profile_enable(int event_id) | 67 | int ftrace_profile_enable(int event_id) |
11 | { | 68 | { |
12 | struct ftrace_event_call *event; | 69 | struct ftrace_event_call *event; |
@@ -14,8 +71,9 @@ int ftrace_profile_enable(int event_id) | |||
14 | 71 | ||
15 | mutex_lock(&event_mutex); | 72 | mutex_lock(&event_mutex); |
16 | list_for_each_entry(event, &ftrace_events, list) { | 73 | list_for_each_entry(event, &ftrace_events, list) { |
17 | if (event->id == event_id && event->profile_enable) { | 74 | if (event->id == event_id && event->profile_enable && |
18 | ret = event->profile_enable(event); | 75 | try_module_get(event->mod)) { |
76 | ret = ftrace_profile_enable_event(event); | ||
19 | break; | 77 | break; |
20 | } | 78 | } |
21 | } | 79 | } |
@@ -24,6 +82,33 @@ int ftrace_profile_enable(int event_id) | |||
24 | return ret; | 82 | return ret; |
25 | } | 83 | } |
26 | 84 | ||
85 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | ||
86 | { | ||
87 | char *buf, *nmi_buf; | ||
88 | |||
89 | if (!atomic_add_negative(-1, &event->profile_count)) | ||
90 | return; | ||
91 | |||
92 | event->profile_disable(); | ||
93 | |||
94 | if (!--total_profile_count) { | ||
95 | buf = trace_profile_buf; | ||
96 | rcu_assign_pointer(trace_profile_buf, NULL); | ||
97 | |||
98 | nmi_buf = trace_profile_buf_nmi; | ||
99 | rcu_assign_pointer(trace_profile_buf_nmi, NULL); | ||
100 | |||
101 | /* | ||
102 | * Ensure every events in profiling have finished before | ||
103 | * releasing the buffers | ||
104 | */ | ||
105 | synchronize_sched(); | ||
106 | |||
107 | free_percpu(buf); | ||
108 | free_percpu(nmi_buf); | ||
109 | } | ||
110 | } | ||
111 | |||
27 | void ftrace_profile_disable(int event_id) | 112 | void ftrace_profile_disable(int event_id) |
28 | { | 113 | { |
29 | struct ftrace_event_call *event; | 114 | struct ftrace_event_call *event; |
@@ -31,7 +116,8 @@ void ftrace_profile_disable(int event_id) | |||
31 | mutex_lock(&event_mutex); | 116 | mutex_lock(&event_mutex); |
32 | list_for_each_entry(event, &ftrace_events, list) { | 117 | list_for_each_entry(event, &ftrace_events, list) { |
33 | if (event->id == event_id) { | 118 | if (event->id == event_id) { |
34 | event->profile_disable(event); | 119 | ftrace_profile_disable_event(event); |
120 | module_put(event->mod); | ||
35 | break; | 121 | break; |
36 | } | 122 | } |
37 | } | 123 | } |
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h deleted file mode 100644 index 6db005e12487..000000000000 --- a/kernel/trace/trace_event_types.h +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM ftrace | ||
3 | |||
4 | /* | ||
5 | * We cheat and use the proto type field as the ID | ||
6 | * and args as the entry type (minus 'struct') | ||
7 | */ | ||
8 | TRACE_EVENT_FORMAT(function, TRACE_FN, ftrace_entry, ignore, | ||
9 | TRACE_STRUCT( | ||
10 | TRACE_FIELD(unsigned long, ip, ip) | ||
11 | TRACE_FIELD(unsigned long, parent_ip, parent_ip) | ||
12 | ), | ||
13 | TP_RAW_FMT(" %lx <-- %lx") | ||
14 | ); | ||
15 | |||
16 | TRACE_EVENT_FORMAT(funcgraph_entry, TRACE_GRAPH_ENT, | ||
17 | ftrace_graph_ent_entry, ignore, | ||
18 | TRACE_STRUCT( | ||
19 | TRACE_FIELD(unsigned long, graph_ent.func, func) | ||
20 | TRACE_FIELD(int, graph_ent.depth, depth) | ||
21 | ), | ||
22 | TP_RAW_FMT("--> %lx (%d)") | ||
23 | ); | ||
24 | |||
25 | TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET, | ||
26 | ftrace_graph_ret_entry, ignore, | ||
27 | TRACE_STRUCT( | ||
28 | TRACE_FIELD(unsigned long, ret.func, func) | ||
29 | TRACE_FIELD(unsigned long long, ret.calltime, calltime) | ||
30 | TRACE_FIELD(unsigned long long, ret.rettime, rettime) | ||
31 | TRACE_FIELD(unsigned long, ret.overrun, overrun) | ||
32 | TRACE_FIELD(int, ret.depth, depth) | ||
33 | ), | ||
34 | TP_RAW_FMT("<-- %lx (%d)") | ||
35 | ); | ||
36 | |||
37 | TRACE_EVENT_FORMAT(wakeup, TRACE_WAKE, ctx_switch_entry, ignore, | ||
38 | TRACE_STRUCT( | ||
39 | TRACE_FIELD(unsigned int, prev_pid, prev_pid) | ||
40 | TRACE_FIELD(unsigned char, prev_prio, prev_prio) | ||
41 | TRACE_FIELD(unsigned char, prev_state, prev_state) | ||
42 | TRACE_FIELD(unsigned int, next_pid, next_pid) | ||
43 | TRACE_FIELD(unsigned char, next_prio, next_prio) | ||
44 | TRACE_FIELD(unsigned char, next_state, next_state) | ||
45 | TRACE_FIELD(unsigned int, next_cpu, next_cpu) | ||
46 | ), | ||
47 | TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]") | ||
48 | ); | ||
49 | |||
50 | TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore, | ||
51 | TRACE_STRUCT( | ||
52 | TRACE_FIELD(unsigned int, prev_pid, prev_pid) | ||
53 | TRACE_FIELD(unsigned char, prev_prio, prev_prio) | ||
54 | TRACE_FIELD(unsigned char, prev_state, prev_state) | ||
55 | TRACE_FIELD(unsigned int, next_pid, next_pid) | ||
56 | TRACE_FIELD(unsigned char, next_prio, next_prio) | ||
57 | TRACE_FIELD(unsigned char, next_state, next_state) | ||
58 | TRACE_FIELD(unsigned int, next_cpu, next_cpu) | ||
59 | ), | ||
60 | TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]") | ||
61 | ); | ||
62 | |||
63 | TRACE_EVENT_FORMAT_NOFILTER(special, TRACE_SPECIAL, special_entry, ignore, | ||
64 | TRACE_STRUCT( | ||
65 | TRACE_FIELD(unsigned long, arg1, arg1) | ||
66 | TRACE_FIELD(unsigned long, arg2, arg2) | ||
67 | TRACE_FIELD(unsigned long, arg3, arg3) | ||
68 | ), | ||
69 | TP_RAW_FMT("(%08lx) (%08lx) (%08lx)") | ||
70 | ); | ||
71 | |||
72 | /* | ||
73 | * Stack-trace entry: | ||
74 | */ | ||
75 | |||
76 | /* #define FTRACE_STACK_ENTRIES 8 */ | ||
77 | |||
78 | TRACE_EVENT_FORMAT(kernel_stack, TRACE_STACK, stack_entry, ignore, | ||
79 | TRACE_STRUCT( | ||
80 | TRACE_FIELD(unsigned long, caller[0], stack0) | ||
81 | TRACE_FIELD(unsigned long, caller[1], stack1) | ||
82 | TRACE_FIELD(unsigned long, caller[2], stack2) | ||
83 | TRACE_FIELD(unsigned long, caller[3], stack3) | ||
84 | TRACE_FIELD(unsigned long, caller[4], stack4) | ||
85 | TRACE_FIELD(unsigned long, caller[5], stack5) | ||
86 | TRACE_FIELD(unsigned long, caller[6], stack6) | ||
87 | TRACE_FIELD(unsigned long, caller[7], stack7) | ||
88 | ), | ||
89 | TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" | ||
90 | "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n") | ||
91 | ); | ||
92 | |||
93 | TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore, | ||
94 | TRACE_STRUCT( | ||
95 | TRACE_FIELD(unsigned long, caller[0], stack0) | ||
96 | TRACE_FIELD(unsigned long, caller[1], stack1) | ||
97 | TRACE_FIELD(unsigned long, caller[2], stack2) | ||
98 | TRACE_FIELD(unsigned long, caller[3], stack3) | ||
99 | TRACE_FIELD(unsigned long, caller[4], stack4) | ||
100 | TRACE_FIELD(unsigned long, caller[5], stack5) | ||
101 | TRACE_FIELD(unsigned long, caller[6], stack6) | ||
102 | TRACE_FIELD(unsigned long, caller[7], stack7) | ||
103 | ), | ||
104 | TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" | ||
105 | "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n") | ||
106 | ); | ||
107 | |||
108 | TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, | ||
109 | TRACE_STRUCT( | ||
110 | TRACE_FIELD(unsigned long, ip, ip) | ||
111 | TRACE_FIELD(char *, fmt, fmt) | ||
112 | TRACE_FIELD_ZERO_CHAR(buf) | ||
113 | ), | ||
114 | TP_RAW_FMT("%08lx (%d) fmt:%p %s") | ||
115 | ); | ||
116 | |||
117 | TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, | ||
118 | TRACE_STRUCT( | ||
119 | TRACE_FIELD(unsigned long, ip, ip) | ||
120 | TRACE_FIELD_ZERO_CHAR(buf) | ||
121 | ), | ||
122 | TP_RAW_FMT("%08lx (%d) fmt:%p %s") | ||
123 | ); | ||
124 | |||
125 | TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore, | ||
126 | TRACE_STRUCT( | ||
127 | TRACE_FIELD(unsigned int, line, line) | ||
128 | TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, | ||
129 | TRACE_FUNC_SIZE+1, func) | ||
130 | TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, | ||
131 | TRACE_FUNC_SIZE+1, file) | ||
132 | TRACE_FIELD(char, correct, correct) | ||
133 | ), | ||
134 | TP_RAW_FMT("%u:%s:%s (%u)") | ||
135 | ); | ||
136 | |||
137 | TRACE_EVENT_FORMAT(hw_branch, TRACE_HW_BRANCHES, hw_branch_entry, ignore, | ||
138 | TRACE_STRUCT( | ||
139 | TRACE_FIELD(u64, from, from) | ||
140 | TRACE_FIELD(u64, to, to) | ||
141 | ), | ||
142 | TP_RAW_FMT("from: %llx to: %llx") | ||
143 | ); | ||
144 | |||
145 | TRACE_EVENT_FORMAT(power, TRACE_POWER, trace_power, ignore, | ||
146 | TRACE_STRUCT( | ||
147 | TRACE_FIELD_SIGN(ktime_t, state_data.stamp, stamp, 1) | ||
148 | TRACE_FIELD_SIGN(ktime_t, state_data.end, end, 1) | ||
149 | TRACE_FIELD(int, state_data.type, type) | ||
150 | TRACE_FIELD(int, state_data.state, state) | ||
151 | ), | ||
152 | TP_RAW_FMT("%llx->%llx type:%u state:%u") | ||
153 | ); | ||
154 | |||
155 | TRACE_EVENT_FORMAT(kmem_alloc, TRACE_KMEM_ALLOC, kmemtrace_alloc_entry, ignore, | ||
156 | TRACE_STRUCT( | ||
157 | TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id) | ||
158 | TRACE_FIELD(unsigned long, call_site, call_site) | ||
159 | TRACE_FIELD(const void *, ptr, ptr) | ||
160 | TRACE_FIELD(size_t, bytes_req, bytes_req) | ||
161 | TRACE_FIELD(size_t, bytes_alloc, bytes_alloc) | ||
162 | TRACE_FIELD(gfp_t, gfp_flags, gfp_flags) | ||
163 | TRACE_FIELD(int, node, node) | ||
164 | ), | ||
165 | TP_RAW_FMT("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu" | ||
166 | " flags:%x node:%d") | ||
167 | ); | ||
168 | |||
169 | TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore, | ||
170 | TRACE_STRUCT( | ||
171 | TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id) | ||
172 | TRACE_FIELD(unsigned long, call_site, call_site) | ||
173 | TRACE_FIELD(const void *, ptr, ptr) | ||
174 | ), | ||
175 | TP_RAW_FMT("type:%u call_site:%lx ptr:%p") | ||
176 | ); | ||
177 | |||
178 | #undef TRACE_SYSTEM | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 78b1ed230177..7c18d154ea28 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "trace_output.h" | 22 | #include "trace_output.h" |
23 | 23 | ||
24 | #undef TRACE_SYSTEM | ||
24 | #define TRACE_SYSTEM "TRACE_SYSTEM" | 25 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
25 | 26 | ||
26 | DEFINE_MUTEX(event_mutex); | 27 | DEFINE_MUTEX(event_mutex); |
@@ -86,7 +87,7 @@ int trace_define_common_fields(struct ftrace_event_call *call) | |||
86 | __common_field(unsigned char, flags); | 87 | __common_field(unsigned char, flags); |
87 | __common_field(unsigned char, preempt_count); | 88 | __common_field(unsigned char, preempt_count); |
88 | __common_field(int, pid); | 89 | __common_field(int, pid); |
89 | __common_field(int, tgid); | 90 | __common_field(int, lock_depth); |
90 | 91 | ||
91 | return ret; | 92 | return ret; |
92 | } | 93 | } |
@@ -230,73 +231,38 @@ static ssize_t | |||
230 | ftrace_event_write(struct file *file, const char __user *ubuf, | 231 | ftrace_event_write(struct file *file, const char __user *ubuf, |
231 | size_t cnt, loff_t *ppos) | 232 | size_t cnt, loff_t *ppos) |
232 | { | 233 | { |
233 | size_t read = 0; | 234 | struct trace_parser parser; |
234 | int i, set = 1; | 235 | ssize_t read, ret; |
235 | ssize_t ret; | ||
236 | char *buf; | ||
237 | char ch; | ||
238 | 236 | ||
239 | if (!cnt || cnt < 0) | 237 | if (!cnt) |
240 | return 0; | 238 | return 0; |
241 | 239 | ||
242 | ret = tracing_update_buffers(); | 240 | ret = tracing_update_buffers(); |
243 | if (ret < 0) | 241 | if (ret < 0) |
244 | return ret; | 242 | return ret; |
245 | 243 | ||
246 | ret = get_user(ch, ubuf++); | 244 | if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) |
247 | if (ret) | ||
248 | return ret; | ||
249 | read++; | ||
250 | cnt--; | ||
251 | |||
252 | /* skip white space */ | ||
253 | while (cnt && isspace(ch)) { | ||
254 | ret = get_user(ch, ubuf++); | ||
255 | if (ret) | ||
256 | return ret; | ||
257 | read++; | ||
258 | cnt--; | ||
259 | } | ||
260 | |||
261 | /* Only white space found? */ | ||
262 | if (isspace(ch)) { | ||
263 | file->f_pos += read; | ||
264 | ret = read; | ||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL); | ||
269 | if (!buf) | ||
270 | return -ENOMEM; | 245 | return -ENOMEM; |
271 | 246 | ||
272 | if (cnt > EVENT_BUF_SIZE) | 247 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
273 | cnt = EVENT_BUF_SIZE; | 248 | |
249 | if (read >= 0 && trace_parser_loaded((&parser))) { | ||
250 | int set = 1; | ||
274 | 251 | ||
275 | i = 0; | 252 | if (*parser.buffer == '!') |
276 | while (cnt && !isspace(ch)) { | ||
277 | if (!i && ch == '!') | ||
278 | set = 0; | 253 | set = 0; |
279 | else | ||
280 | buf[i++] = ch; | ||
281 | 254 | ||
282 | ret = get_user(ch, ubuf++); | 255 | parser.buffer[parser.idx] = 0; |
256 | |||
257 | ret = ftrace_set_clr_event(parser.buffer + !set, set); | ||
283 | if (ret) | 258 | if (ret) |
284 | goto out_free; | 259 | goto out_put; |
285 | read++; | ||
286 | cnt--; | ||
287 | } | 260 | } |
288 | buf[i] = 0; | ||
289 | |||
290 | file->f_pos += read; | ||
291 | |||
292 | ret = ftrace_set_clr_event(buf, set); | ||
293 | if (ret) | ||
294 | goto out_free; | ||
295 | 261 | ||
296 | ret = read; | 262 | ret = read; |
297 | 263 | ||
298 | out_free: | 264 | out_put: |
299 | kfree(buf); | 265 | trace_parser_put(&parser); |
300 | 266 | ||
301 | return ret; | 267 | return ret; |
302 | } | 268 | } |
@@ -304,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
304 | static void * | 270 | static void * |
305 | t_next(struct seq_file *m, void *v, loff_t *pos) | 271 | t_next(struct seq_file *m, void *v, loff_t *pos) |
306 | { | 272 | { |
307 | struct list_head *list = m->private; | 273 | struct ftrace_event_call *call = v; |
308 | struct ftrace_event_call *call; | ||
309 | 274 | ||
310 | (*pos)++; | 275 | (*pos)++; |
311 | 276 | ||
312 | for (;;) { | 277 | list_for_each_entry_continue(call, &ftrace_events, list) { |
313 | if (list == &ftrace_events) | ||
314 | return NULL; | ||
315 | |||
316 | call = list_entry(list, struct ftrace_event_call, list); | ||
317 | |||
318 | /* | 278 | /* |
319 | * The ftrace subsystem is for showing formats only. | 279 | * The ftrace subsystem is for showing formats only. |
320 | * They can not be enabled or disabled via the event files. | 280 | * They can not be enabled or disabled via the event files. |
321 | */ | 281 | */ |
322 | if (call->regfunc) | 282 | if (call->regfunc) |
323 | break; | 283 | return call; |
324 | |||
325 | list = list->next; | ||
326 | } | 284 | } |
327 | 285 | ||
328 | m->private = list->next; | 286 | return NULL; |
329 | |||
330 | return call; | ||
331 | } | 287 | } |
332 | 288 | ||
333 | static void *t_start(struct seq_file *m, loff_t *pos) | 289 | static void *t_start(struct seq_file *m, loff_t *pos) |
334 | { | 290 | { |
335 | struct ftrace_event_call *call = NULL; | 291 | struct ftrace_event_call *call; |
336 | loff_t l; | 292 | loff_t l; |
337 | 293 | ||
338 | mutex_lock(&event_mutex); | 294 | mutex_lock(&event_mutex); |
339 | 295 | ||
340 | m->private = ftrace_events.next; | 296 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
341 | for (l = 0; l <= *pos; ) { | 297 | for (l = 0; l <= *pos; ) { |
342 | call = t_next(m, NULL, &l); | 298 | call = t_next(m, call, &l); |
343 | if (!call) | 299 | if (!call) |
344 | break; | 300 | break; |
345 | } | 301 | } |
@@ -349,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
349 | static void * | 305 | static void * |
350 | s_next(struct seq_file *m, void *v, loff_t *pos) | 306 | s_next(struct seq_file *m, void *v, loff_t *pos) |
351 | { | 307 | { |
352 | struct list_head *list = m->private; | 308 | struct ftrace_event_call *call = v; |
353 | struct ftrace_event_call *call; | ||
354 | 309 | ||
355 | (*pos)++; | 310 | (*pos)++; |
356 | 311 | ||
357 | retry: | 312 | list_for_each_entry_continue(call, &ftrace_events, list) { |
358 | if (list == &ftrace_events) | 313 | if (call->enabled) |
359 | return NULL; | 314 | return call; |
360 | |||
361 | call = list_entry(list, struct ftrace_event_call, list); | ||
362 | |||
363 | if (!call->enabled) { | ||
364 | list = list->next; | ||
365 | goto retry; | ||
366 | } | 315 | } |
367 | 316 | ||
368 | m->private = list->next; | 317 | return NULL; |
369 | |||
370 | return call; | ||
371 | } | 318 | } |
372 | 319 | ||
373 | static void *s_start(struct seq_file *m, loff_t *pos) | 320 | static void *s_start(struct seq_file *m, loff_t *pos) |
374 | { | 321 | { |
375 | struct ftrace_event_call *call = NULL; | 322 | struct ftrace_event_call *call; |
376 | loff_t l; | 323 | loff_t l; |
377 | 324 | ||
378 | mutex_lock(&event_mutex); | 325 | mutex_lock(&event_mutex); |
379 | 326 | ||
380 | m->private = ftrace_events.next; | 327 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
381 | for (l = 0; l <= *pos; ) { | 328 | for (l = 0; l <= *pos; ) { |
382 | call = s_next(m, NULL, &l); | 329 | call = s_next(m, call, &l); |
383 | if (!call) | 330 | if (!call) |
384 | break; | 331 | break; |
385 | } | 332 | } |
@@ -560,7 +507,7 @@ extern char *__bad_type_size(void); | |||
560 | #define FIELD(type, name) \ | 507 | #define FIELD(type, name) \ |
561 | sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ | 508 | sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ |
562 | #type, "common_" #name, offsetof(typeof(field), name), \ | 509 | #type, "common_" #name, offsetof(typeof(field), name), \ |
563 | sizeof(field.name) | 510 | sizeof(field.name), is_signed_type(type) |
564 | 511 | ||
565 | static int trace_write_header(struct trace_seq *s) | 512 | static int trace_write_header(struct trace_seq *s) |
566 | { | 513 | { |
@@ -568,17 +515,17 @@ static int trace_write_header(struct trace_seq *s) | |||
568 | 515 | ||
569 | /* struct trace_entry */ | 516 | /* struct trace_entry */ |
570 | return trace_seq_printf(s, | 517 | return trace_seq_printf(s, |
571 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 518 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
572 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 519 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
573 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 520 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
574 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 521 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
575 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 522 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" |
576 | "\n", | 523 | "\n", |
577 | FIELD(unsigned short, type), | 524 | FIELD(unsigned short, type), |
578 | FIELD(unsigned char, flags), | 525 | FIELD(unsigned char, flags), |
579 | FIELD(unsigned char, preempt_count), | 526 | FIELD(unsigned char, preempt_count), |
580 | FIELD(int, pid), | 527 | FIELD(int, pid), |
581 | FIELD(int, tgid)); | 528 | FIELD(int, lock_depth)); |
582 | } | 529 | } |
583 | 530 | ||
584 | static ssize_t | 531 | static ssize_t |
@@ -931,9 +878,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
931 | "'%s/filter' entry\n", name); | 878 | "'%s/filter' entry\n", name); |
932 | } | 879 | } |
933 | 880 | ||
934 | entry = trace_create_file("enable", 0644, system->entry, | 881 | trace_create_file("enable", 0644, system->entry, |
935 | (void *)system->name, | 882 | (void *)system->name, |
936 | &ftrace_system_enable_fops); | 883 | &ftrace_system_enable_fops); |
937 | 884 | ||
938 | return system->entry; | 885 | return system->entry; |
939 | } | 886 | } |
@@ -945,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
945 | const struct file_operations *filter, | 892 | const struct file_operations *filter, |
946 | const struct file_operations *format) | 893 | const struct file_operations *format) |
947 | { | 894 | { |
948 | struct dentry *entry; | ||
949 | int ret; | 895 | int ret; |
950 | 896 | ||
951 | /* | 897 | /* |
@@ -963,12 +909,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
963 | } | 909 | } |
964 | 910 | ||
965 | if (call->regfunc) | 911 | if (call->regfunc) |
966 | entry = trace_create_file("enable", 0644, call->dir, call, | 912 | trace_create_file("enable", 0644, call->dir, call, |
967 | enable); | 913 | enable); |
968 | 914 | ||
969 | if (call->id && call->profile_enable) | 915 | if (call->id && call->profile_enable) |
970 | entry = trace_create_file("id", 0444, call->dir, call, | 916 | trace_create_file("id", 0444, call->dir, call, |
971 | id); | 917 | id); |
972 | 918 | ||
973 | if (call->define_fields) { | 919 | if (call->define_fields) { |
974 | ret = call->define_fields(call); | 920 | ret = call->define_fields(call); |
@@ -977,16 +923,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
977 | " events/%s\n", call->name); | 923 | " events/%s\n", call->name); |
978 | return ret; | 924 | return ret; |
979 | } | 925 | } |
980 | entry = trace_create_file("filter", 0644, call->dir, call, | 926 | trace_create_file("filter", 0644, call->dir, call, |
981 | filter); | 927 | filter); |
982 | } | 928 | } |
983 | 929 | ||
984 | /* A trace may not want to export its format */ | 930 | /* A trace may not want to export its format */ |
985 | if (!call->show_format) | 931 | if (!call->show_format) |
986 | return 0; | 932 | return 0; |
987 | 933 | ||
988 | entry = trace_create_file("format", 0444, call->dir, call, | 934 | trace_create_file("format", 0444, call->dir, call, |
989 | format); | 935 | format); |
990 | 936 | ||
991 | return 0; | 937 | return 0; |
992 | } | 938 | } |
@@ -1187,7 +1133,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
1187 | } | 1133 | } |
1188 | #endif /* CONFIG_MODULES */ | 1134 | #endif /* CONFIG_MODULES */ |
1189 | 1135 | ||
1190 | struct notifier_block trace_module_nb = { | 1136 | static struct notifier_block trace_module_nb = { |
1191 | .notifier_call = trace_module_notify, | 1137 | .notifier_call = trace_module_notify, |
1192 | .priority = 0, | 1138 | .priority = 0, |
1193 | }; | 1139 | }; |
@@ -1359,6 +1305,18 @@ static __init void event_trace_self_tests(void) | |||
1359 | if (!call->regfunc) | 1305 | if (!call->regfunc) |
1360 | continue; | 1306 | continue; |
1361 | 1307 | ||
1308 | /* | ||
1309 | * Testing syscall events here is pretty useless, but | ||
1310 | * we still do it if configured. But this is time consuming. | ||
1311 | * What we really need is a user thread to perform the | ||
1312 | * syscalls as we test. | ||
1313 | */ | ||
1314 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS | ||
1315 | if (call->system && | ||
1316 | strcmp(call->system, "syscalls") == 0) | ||
1317 | continue; | ||
1318 | #endif | ||
1319 | |||
1362 | pr_info("Testing event %s: ", call->name); | 1320 | pr_info("Testing event %s: ", call->name); |
1363 | 1321 | ||
1364 | /* | 1322 | /* |
@@ -1432,7 +1390,7 @@ static __init void event_trace_self_tests(void) | |||
1432 | 1390 | ||
1433 | #ifdef CONFIG_FUNCTION_TRACER | 1391 | #ifdef CONFIG_FUNCTION_TRACER |
1434 | 1392 | ||
1435 | static DEFINE_PER_CPU(atomic_t, test_event_disable); | 1393 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
1436 | 1394 | ||
1437 | static void | 1395 | static void |
1438 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1396 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
@@ -1449,7 +1407,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1449 | pc = preempt_count(); | 1407 | pc = preempt_count(); |
1450 | resched = ftrace_preempt_disable(); | 1408 | resched = ftrace_preempt_disable(); |
1451 | cpu = raw_smp_processor_id(); | 1409 | cpu = raw_smp_processor_id(); |
1452 | disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | 1410 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1453 | 1411 | ||
1454 | if (disabled != 1) | 1412 | if (disabled != 1) |
1455 | goto out; | 1413 | goto out; |
@@ -1468,7 +1426,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1468 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); | 1426 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); |
1469 | 1427 | ||
1470 | out: | 1428 | out: |
1471 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1429 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1472 | ftrace_preempt_enable(resched); | 1430 | ftrace_preempt_enable(resched); |
1473 | } | 1431 | } |
1474 | 1432 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 93660fbbf629..21d34757b955 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -18,11 +18,10 @@ | |||
18 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | 18 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/module.h> | 21 | #include <linux/module.h> |
24 | #include <linux/ctype.h> | 22 | #include <linux/ctype.h> |
25 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/perf_event.h> | ||
26 | 25 | ||
27 | #include "trace.h" | 26 | #include "trace.h" |
28 | #include "trace_output.h" | 27 | #include "trace_output.h" |
@@ -31,6 +30,7 @@ enum filter_op_ids | |||
31 | { | 30 | { |
32 | OP_OR, | 31 | OP_OR, |
33 | OP_AND, | 32 | OP_AND, |
33 | OP_GLOB, | ||
34 | OP_NE, | 34 | OP_NE, |
35 | OP_EQ, | 35 | OP_EQ, |
36 | OP_LT, | 36 | OP_LT, |
@@ -48,16 +48,17 @@ struct filter_op { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct filter_op filter_ops[] = { | 50 | static struct filter_op filter_ops[] = { |
51 | { OP_OR, "||", 1 }, | 51 | { OP_OR, "||", 1 }, |
52 | { OP_AND, "&&", 2 }, | 52 | { OP_AND, "&&", 2 }, |
53 | { OP_NE, "!=", 4 }, | 53 | { OP_GLOB, "~", 4 }, |
54 | { OP_EQ, "==", 4 }, | 54 | { OP_NE, "!=", 4 }, |
55 | { OP_LT, "<", 5 }, | 55 | { OP_EQ, "==", 4 }, |
56 | { OP_LE, "<=", 5 }, | 56 | { OP_LT, "<", 5 }, |
57 | { OP_GT, ">", 5 }, | 57 | { OP_LE, "<=", 5 }, |
58 | { OP_GE, ">=", 5 }, | 58 | { OP_GT, ">", 5 }, |
59 | { OP_NONE, "OP_NONE", 0 }, | 59 | { OP_GE, ">=", 5 }, |
60 | { OP_OPEN_PAREN, "(", 0 }, | 60 | { OP_NONE, "OP_NONE", 0 }, |
61 | { OP_OPEN_PAREN, "(", 0 }, | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | enum { | 64 | enum { |
@@ -121,6 +122,47 @@ struct filter_parse_state { | |||
121 | } operand; | 122 | } operand; |
122 | }; | 123 | }; |
123 | 124 | ||
125 | #define DEFINE_COMPARISON_PRED(type) \ | ||
126 | static int filter_pred_##type(struct filter_pred *pred, void *event, \ | ||
127 | int val1, int val2) \ | ||
128 | { \ | ||
129 | type *addr = (type *)(event + pred->offset); \ | ||
130 | type val = (type)pred->val; \ | ||
131 | int match = 0; \ | ||
132 | \ | ||
133 | switch (pred->op) { \ | ||
134 | case OP_LT: \ | ||
135 | match = (*addr < val); \ | ||
136 | break; \ | ||
137 | case OP_LE: \ | ||
138 | match = (*addr <= val); \ | ||
139 | break; \ | ||
140 | case OP_GT: \ | ||
141 | match = (*addr > val); \ | ||
142 | break; \ | ||
143 | case OP_GE: \ | ||
144 | match = (*addr >= val); \ | ||
145 | break; \ | ||
146 | default: \ | ||
147 | break; \ | ||
148 | } \ | ||
149 | \ | ||
150 | return match; \ | ||
151 | } | ||
152 | |||
153 | #define DEFINE_EQUALITY_PRED(size) \ | ||
154 | static int filter_pred_##size(struct filter_pred *pred, void *event, \ | ||
155 | int val1, int val2) \ | ||
156 | { \ | ||
157 | u##size *addr = (u##size *)(event + pred->offset); \ | ||
158 | u##size val = (u##size)pred->val; \ | ||
159 | int match; \ | ||
160 | \ | ||
161 | match = (val == *addr) ^ pred->not; \ | ||
162 | \ | ||
163 | return match; \ | ||
164 | } | ||
165 | |||
124 | DEFINE_COMPARISON_PRED(s64); | 166 | DEFINE_COMPARISON_PRED(s64); |
125 | DEFINE_COMPARISON_PRED(u64); | 167 | DEFINE_COMPARISON_PRED(u64); |
126 | DEFINE_COMPARISON_PRED(s32); | 168 | DEFINE_COMPARISON_PRED(s32); |
@@ -156,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event, | |||
156 | char *addr = (char *)(event + pred->offset); | 198 | char *addr = (char *)(event + pred->offset); |
157 | int cmp, match; | 199 | int cmp, match; |
158 | 200 | ||
159 | cmp = strncmp(addr, pred->str_val, pred->str_len); | 201 | cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); |
160 | 202 | ||
161 | match = (!cmp) ^ pred->not; | 203 | match = cmp ^ pred->not; |
162 | 204 | ||
163 | return match; | 205 | return match; |
164 | } | 206 | } |
@@ -170,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, | |||
170 | char **addr = (char **)(event + pred->offset); | 212 | char **addr = (char **)(event + pred->offset); |
171 | int cmp, match; | 213 | int cmp, match; |
172 | 214 | ||
173 | cmp = strncmp(*addr, pred->str_val, pred->str_len); | 215 | cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); |
174 | 216 | ||
175 | match = (!cmp) ^ pred->not; | 217 | match = cmp ^ pred->not; |
176 | 218 | ||
177 | return match; | 219 | return match; |
178 | } | 220 | } |
@@ -196,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event, | |||
196 | char *addr = (char *)(event + str_loc); | 238 | char *addr = (char *)(event + str_loc); |
197 | int cmp, match; | 239 | int cmp, match; |
198 | 240 | ||
199 | cmp = strncmp(addr, pred->str_val, str_len); | 241 | cmp = pred->regex.match(addr, &pred->regex, str_len); |
200 | 242 | ||
201 | match = (!cmp) ^ pred->not; | 243 | match = cmp ^ pred->not; |
202 | 244 | ||
203 | return match; | 245 | return match; |
204 | } | 246 | } |
@@ -209,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event, | |||
209 | return 0; | 251 | return 0; |
210 | } | 252 | } |
211 | 253 | ||
254 | /* Basic regex callbacks */ | ||
255 | static int regex_match_full(char *str, struct regex *r, int len) | ||
256 | { | ||
257 | if (strncmp(str, r->pattern, len) == 0) | ||
258 | return 1; | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static int regex_match_front(char *str, struct regex *r, int len) | ||
263 | { | ||
264 | if (strncmp(str, r->pattern, len) == 0) | ||
265 | return 1; | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int regex_match_middle(char *str, struct regex *r, int len) | ||
270 | { | ||
271 | if (strstr(str, r->pattern)) | ||
272 | return 1; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int regex_match_end(char *str, struct regex *r, int len) | ||
277 | { | ||
278 | char *ptr = strstr(str, r->pattern); | ||
279 | |||
280 | if (ptr && (ptr[r->len] == 0)) | ||
281 | return 1; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * filter_parse_regex - parse a basic regex | ||
287 | * @buff: the raw regex | ||
288 | * @len: length of the regex | ||
289 | * @search: will point to the beginning of the string to compare | ||
290 | * @not: tell whether the match will have to be inverted | ||
291 | * | ||
292 | * This passes in a buffer containing a regex and this function will | ||
293 | * set search to point to the search part of the buffer and | ||
294 | * return the type of search it is (see enum above). | ||
295 | * This does modify buff. | ||
296 | * | ||
297 | * Returns enum type. | ||
298 | * search returns the pointer to use for comparison. | ||
299 | * not returns 1 if buff started with a '!' | ||
300 | * 0 otherwise. | ||
301 | */ | ||
302 | enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) | ||
303 | { | ||
304 | int type = MATCH_FULL; | ||
305 | int i; | ||
306 | |||
307 | if (buff[0] == '!') { | ||
308 | *not = 1; | ||
309 | buff++; | ||
310 | len--; | ||
311 | } else | ||
312 | *not = 0; | ||
313 | |||
314 | *search = buff; | ||
315 | |||
316 | for (i = 0; i < len; i++) { | ||
317 | if (buff[i] == '*') { | ||
318 | if (!i) { | ||
319 | *search = buff + 1; | ||
320 | type = MATCH_END_ONLY; | ||
321 | } else { | ||
322 | if (type == MATCH_END_ONLY) | ||
323 | type = MATCH_MIDDLE_ONLY; | ||
324 | else | ||
325 | type = MATCH_FRONT_ONLY; | ||
326 | buff[i] = 0; | ||
327 | break; | ||
328 | } | ||
329 | } | ||
330 | } | ||
331 | |||
332 | return type; | ||
333 | } | ||
334 | |||
335 | static void filter_build_regex(struct filter_pred *pred) | ||
336 | { | ||
337 | struct regex *r = &pred->regex; | ||
338 | char *search; | ||
339 | enum regex_type type = MATCH_FULL; | ||
340 | int not = 0; | ||
341 | |||
342 | if (pred->op == OP_GLOB) { | ||
343 | type = filter_parse_regex(r->pattern, r->len, &search, ¬); | ||
344 | r->len = strlen(search); | ||
345 | memmove(r->pattern, search, r->len+1); | ||
346 | } | ||
347 | |||
348 | switch (type) { | ||
349 | case MATCH_FULL: | ||
350 | r->match = regex_match_full; | ||
351 | break; | ||
352 | case MATCH_FRONT_ONLY: | ||
353 | r->match = regex_match_front; | ||
354 | break; | ||
355 | case MATCH_MIDDLE_ONLY: | ||
356 | r->match = regex_match_middle; | ||
357 | break; | ||
358 | case MATCH_END_ONLY: | ||
359 | r->match = regex_match_end; | ||
360 | break; | ||
361 | } | ||
362 | |||
363 | pred->not ^= not; | ||
364 | } | ||
365 | |||
212 | /* return 1 if event matches, 0 otherwise (discard) */ | 366 | /* return 1 if event matches, 0 otherwise (discard) */ |
213 | int filter_match_preds(struct ftrace_event_call *call, void *rec) | 367 | int filter_match_preds(struct event_filter *filter, void *rec) |
214 | { | 368 | { |
215 | struct event_filter *filter = call->filter; | ||
216 | int match, top = 0, val1 = 0, val2 = 0; | 369 | int match, top = 0, val1 = 0, val2 = 0; |
217 | int stack[MAX_FILTER_PRED]; | 370 | int stack[MAX_FILTER_PRED]; |
218 | struct filter_pred *pred; | 371 | struct filter_pred *pred; |
@@ -355,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred) | |||
355 | { | 508 | { |
356 | kfree(pred->field_name); | 509 | kfree(pred->field_name); |
357 | pred->field_name = NULL; | 510 | pred->field_name = NULL; |
358 | pred->str_len = 0; | 511 | pred->regex.len = 0; |
359 | } | 512 | } |
360 | 513 | ||
361 | static int filter_set_pred(struct filter_pred *dest, | 514 | static int filter_set_pred(struct filter_pred *dest, |
@@ -385,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call) | |||
385 | filter->preds[i]->fn = filter_pred_none; | 538 | filter->preds[i]->fn = filter_pred_none; |
386 | } | 539 | } |
387 | 540 | ||
388 | void destroy_preds(struct ftrace_event_call *call) | 541 | static void __free_preds(struct event_filter *filter) |
389 | { | 542 | { |
390 | struct event_filter *filter = call->filter; | ||
391 | int i; | 543 | int i; |
392 | 544 | ||
393 | if (!filter) | 545 | if (!filter) |
@@ -400,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call) | |||
400 | kfree(filter->preds); | 552 | kfree(filter->preds); |
401 | kfree(filter->filter_string); | 553 | kfree(filter->filter_string); |
402 | kfree(filter); | 554 | kfree(filter); |
555 | } | ||
556 | |||
557 | void destroy_preds(struct ftrace_event_call *call) | ||
558 | { | ||
559 | __free_preds(call->filter); | ||
403 | call->filter = NULL; | 560 | call->filter = NULL; |
561 | call->filter_active = 0; | ||
404 | } | 562 | } |
405 | 563 | ||
406 | static int init_preds(struct ftrace_event_call *call) | 564 | static struct event_filter *__alloc_preds(void) |
407 | { | 565 | { |
408 | struct event_filter *filter; | 566 | struct event_filter *filter; |
409 | struct filter_pred *pred; | 567 | struct filter_pred *pred; |
410 | int i; | 568 | int i; |
411 | 569 | ||
412 | if (call->filter) | 570 | filter = kzalloc(sizeof(*filter), GFP_KERNEL); |
413 | return 0; | 571 | if (!filter) |
414 | 572 | return ERR_PTR(-ENOMEM); | |
415 | filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); | ||
416 | if (!call->filter) | ||
417 | return -ENOMEM; | ||
418 | 573 | ||
419 | filter->n_preds = 0; | 574 | filter->n_preds = 0; |
420 | 575 | ||
@@ -430,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call) | |||
430 | filter->preds[i] = pred; | 585 | filter->preds[i] = pred; |
431 | } | 586 | } |
432 | 587 | ||
433 | return 0; | 588 | return filter; |
434 | 589 | ||
435 | oom: | 590 | oom: |
436 | destroy_preds(call); | 591 | __free_preds(filter); |
592 | return ERR_PTR(-ENOMEM); | ||
593 | } | ||
437 | 594 | ||
438 | return -ENOMEM; | 595 | static int init_preds(struct ftrace_event_call *call) |
596 | { | ||
597 | if (call->filter) | ||
598 | return 0; | ||
599 | |||
600 | call->filter_active = 0; | ||
601 | call->filter = __alloc_preds(); | ||
602 | if (IS_ERR(call->filter)) | ||
603 | return PTR_ERR(call->filter); | ||
604 | |||
605 | return 0; | ||
439 | } | 606 | } |
440 | 607 | ||
441 | static int init_subsystem_preds(struct event_subsystem *system) | 608 | static int init_subsystem_preds(struct event_subsystem *system) |
@@ -458,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system) | |||
458 | return 0; | 625 | return 0; |
459 | } | 626 | } |
460 | 627 | ||
461 | enum { | 628 | static void filter_free_subsystem_preds(struct event_subsystem *system) |
462 | FILTER_DISABLE_ALL, | ||
463 | FILTER_INIT_NO_RESET, | ||
464 | FILTER_SKIP_NO_RESET, | ||
465 | }; | ||
466 | |||
467 | static void filter_free_subsystem_preds(struct event_subsystem *system, | ||
468 | int flag) | ||
469 | { | 629 | { |
470 | struct ftrace_event_call *call; | 630 | struct ftrace_event_call *call; |
471 | 631 | ||
@@ -476,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
476 | if (strcmp(call->system, system->name) != 0) | 636 | if (strcmp(call->system, system->name) != 0) |
477 | continue; | 637 | continue; |
478 | 638 | ||
479 | if (flag == FILTER_INIT_NO_RESET) { | ||
480 | call->filter->no_reset = false; | ||
481 | continue; | ||
482 | } | ||
483 | |||
484 | if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) | ||
485 | continue; | ||
486 | |||
487 | filter_disable_preds(call); | 639 | filter_disable_preds(call); |
488 | remove_filter_string(call->filter); | 640 | remove_filter_string(call->filter); |
489 | } | 641 | } |
@@ -491,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, | |||
491 | 643 | ||
492 | static int filter_add_pred_fn(struct filter_parse_state *ps, | 644 | static int filter_add_pred_fn(struct filter_parse_state *ps, |
493 | struct ftrace_event_call *call, | 645 | struct ftrace_event_call *call, |
646 | struct event_filter *filter, | ||
494 | struct filter_pred *pred, | 647 | struct filter_pred *pred, |
495 | filter_pred_fn_t fn) | 648 | filter_pred_fn_t fn) |
496 | { | 649 | { |
497 | struct event_filter *filter = call->filter; | ||
498 | int idx, err; | 650 | int idx, err; |
499 | 651 | ||
500 | if (filter->n_preds == MAX_FILTER_PRED) { | 652 | if (filter->n_preds == MAX_FILTER_PRED) { |
@@ -509,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps, | |||
509 | return err; | 661 | return err; |
510 | 662 | ||
511 | filter->n_preds++; | 663 | filter->n_preds++; |
512 | call->filter_active = 1; | ||
513 | 664 | ||
514 | return 0; | 665 | return 0; |
515 | } | 666 | } |
@@ -534,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field) | |||
534 | 685 | ||
535 | static int is_legal_op(struct ftrace_event_field *field, int op) | 686 | static int is_legal_op(struct ftrace_event_field *field, int op) |
536 | { | 687 | { |
537 | if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) | 688 | if (is_string_field(field) && |
689 | (op != OP_EQ && op != OP_NE && op != OP_GLOB)) | ||
690 | return 0; | ||
691 | if (!is_string_field(field) && op == OP_GLOB) | ||
538 | return 0; | 692 | return 0; |
539 | 693 | ||
540 | return 1; | 694 | return 1; |
@@ -585,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size, | |||
585 | 739 | ||
586 | static int filter_add_pred(struct filter_parse_state *ps, | 740 | static int filter_add_pred(struct filter_parse_state *ps, |
587 | struct ftrace_event_call *call, | 741 | struct ftrace_event_call *call, |
742 | struct event_filter *filter, | ||
588 | struct filter_pred *pred, | 743 | struct filter_pred *pred, |
589 | bool dry_run) | 744 | bool dry_run) |
590 | { | 745 | { |
@@ -619,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
619 | } | 774 | } |
620 | 775 | ||
621 | if (is_string_field(field)) { | 776 | if (is_string_field(field)) { |
622 | pred->str_len = field->size; | 777 | filter_build_regex(pred); |
623 | 778 | ||
624 | if (field->filter_type == FILTER_STATIC_STRING) | 779 | if (field->filter_type == FILTER_STATIC_STRING) { |
625 | fn = filter_pred_string; | 780 | fn = filter_pred_string; |
626 | else if (field->filter_type == FILTER_DYN_STRING) | 781 | pred->regex.field_len = field->size; |
782 | } else if (field->filter_type == FILTER_DYN_STRING) | ||
627 | fn = filter_pred_strloc; | 783 | fn = filter_pred_strloc; |
628 | else { | 784 | else { |
629 | fn = filter_pred_pchar; | 785 | fn = filter_pred_pchar; |
630 | pred->str_len = strlen(pred->str_val); | 786 | pred->regex.field_len = strlen(pred->regex.pattern); |
631 | } | 787 | } |
632 | } else { | 788 | } else { |
633 | if (field->is_signed) | 789 | if (field->is_signed) |
634 | ret = strict_strtoll(pred->str_val, 0, &val); | 790 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
635 | else | 791 | else |
636 | ret = strict_strtoull(pred->str_val, 0, &val); | 792 | ret = strict_strtoull(pred->regex.pattern, 0, &val); |
637 | if (ret) { | 793 | if (ret) { |
638 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); | 794 | parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); |
639 | return -EINVAL; | 795 | return -EINVAL; |
@@ -653,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
653 | 809 | ||
654 | add_pred_fn: | 810 | add_pred_fn: |
655 | if (!dry_run) | 811 | if (!dry_run) |
656 | return filter_add_pred_fn(ps, call, pred, fn); | 812 | return filter_add_pred_fn(ps, call, filter, pred, fn); |
657 | return 0; | ||
658 | } | ||
659 | |||
660 | static int filter_add_subsystem_pred(struct filter_parse_state *ps, | ||
661 | struct event_subsystem *system, | ||
662 | struct filter_pred *pred, | ||
663 | char *filter_string, | ||
664 | bool dry_run) | ||
665 | { | ||
666 | struct ftrace_event_call *call; | ||
667 | int err = 0; | ||
668 | bool fail = true; | ||
669 | |||
670 | list_for_each_entry(call, &ftrace_events, list) { | ||
671 | |||
672 | if (!call->define_fields) | ||
673 | continue; | ||
674 | |||
675 | if (strcmp(call->system, system->name)) | ||
676 | continue; | ||
677 | |||
678 | if (call->filter->no_reset) | ||
679 | continue; | ||
680 | |||
681 | err = filter_add_pred(ps, call, pred, dry_run); | ||
682 | if (err) | ||
683 | call->filter->no_reset = true; | ||
684 | else | ||
685 | fail = false; | ||
686 | |||
687 | if (!dry_run) | ||
688 | replace_filter_string(call->filter, filter_string); | ||
689 | } | ||
690 | |||
691 | if (fail) { | ||
692 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | ||
693 | return err; | ||
694 | } | ||
695 | return 0; | 813 | return 0; |
696 | } | 814 | } |
697 | 815 | ||
@@ -892,8 +1010,9 @@ static void postfix_clear(struct filter_parse_state *ps) | |||
892 | 1010 | ||
893 | while (!list_empty(&ps->postfix)) { | 1011 | while (!list_empty(&ps->postfix)) { |
894 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); | 1012 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
895 | kfree(elt->operand); | ||
896 | list_del(&elt->list); | 1013 | list_del(&elt->list); |
1014 | kfree(elt->operand); | ||
1015 | kfree(elt); | ||
897 | } | 1016 | } |
898 | } | 1017 | } |
899 | 1018 | ||
@@ -1003,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2) | |||
1003 | return NULL; | 1122 | return NULL; |
1004 | } | 1123 | } |
1005 | 1124 | ||
1006 | strcpy(pred->str_val, operand2); | 1125 | strcpy(pred->regex.pattern, operand2); |
1007 | pred->str_len = strlen(operand2); | 1126 | pred->regex.len = strlen(pred->regex.pattern); |
1008 | 1127 | ||
1009 | pred->op = op; | 1128 | pred->op = op; |
1010 | 1129 | ||
@@ -1048,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps) | |||
1048 | return 0; | 1167 | return 0; |
1049 | } | 1168 | } |
1050 | 1169 | ||
1051 | static int replace_preds(struct event_subsystem *system, | 1170 | static int replace_preds(struct ftrace_event_call *call, |
1052 | struct ftrace_event_call *call, | 1171 | struct event_filter *filter, |
1053 | struct filter_parse_state *ps, | 1172 | struct filter_parse_state *ps, |
1054 | char *filter_string, | 1173 | char *filter_string, |
1055 | bool dry_run) | 1174 | bool dry_run) |
@@ -1096,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system, | |||
1096 | add_pred: | 1215 | add_pred: |
1097 | if (!pred) | 1216 | if (!pred) |
1098 | return -ENOMEM; | 1217 | return -ENOMEM; |
1099 | if (call) | 1218 | err = filter_add_pred(ps, call, filter, pred, dry_run); |
1100 | err = filter_add_pred(ps, call, pred, false); | ||
1101 | else | ||
1102 | err = filter_add_subsystem_pred(ps, system, pred, | ||
1103 | filter_string, dry_run); | ||
1104 | filter_free_pred(pred); | 1219 | filter_free_pred(pred); |
1105 | if (err) | 1220 | if (err) |
1106 | return err; | 1221 | return err; |
@@ -1111,10 +1226,50 @@ add_pred: | |||
1111 | return 0; | 1226 | return 0; |
1112 | } | 1227 | } |
1113 | 1228 | ||
1114 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1229 | static int replace_system_preds(struct event_subsystem *system, |
1230 | struct filter_parse_state *ps, | ||
1231 | char *filter_string) | ||
1115 | { | 1232 | { |
1233 | struct event_filter *filter = system->filter; | ||
1234 | struct ftrace_event_call *call; | ||
1235 | bool fail = true; | ||
1116 | int err; | 1236 | int err; |
1117 | 1237 | ||
1238 | list_for_each_entry(call, &ftrace_events, list) { | ||
1239 | |||
1240 | if (!call->define_fields) | ||
1241 | continue; | ||
1242 | |||
1243 | if (strcmp(call->system, system->name) != 0) | ||
1244 | continue; | ||
1245 | |||
1246 | /* try to see if the filter can be applied */ | ||
1247 | err = replace_preds(call, filter, ps, filter_string, true); | ||
1248 | if (err) | ||
1249 | continue; | ||
1250 | |||
1251 | /* really apply the filter */ | ||
1252 | filter_disable_preds(call); | ||
1253 | err = replace_preds(call, filter, ps, filter_string, false); | ||
1254 | if (err) | ||
1255 | filter_disable_preds(call); | ||
1256 | else { | ||
1257 | call->filter_active = 1; | ||
1258 | replace_filter_string(filter, filter_string); | ||
1259 | } | ||
1260 | fail = false; | ||
1261 | } | ||
1262 | |||
1263 | if (fail) { | ||
1264 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | ||
1265 | return -EINVAL; | ||
1266 | } | ||
1267 | return 0; | ||
1268 | } | ||
1269 | |||
1270 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | ||
1271 | { | ||
1272 | int err; | ||
1118 | struct filter_parse_state *ps; | 1273 | struct filter_parse_state *ps; |
1119 | 1274 | ||
1120 | mutex_lock(&event_mutex); | 1275 | mutex_lock(&event_mutex); |
@@ -1126,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1126 | if (!strcmp(strstrip(filter_string), "0")) { | 1281 | if (!strcmp(strstrip(filter_string), "0")) { |
1127 | filter_disable_preds(call); | 1282 | filter_disable_preds(call); |
1128 | remove_filter_string(call->filter); | 1283 | remove_filter_string(call->filter); |
1129 | mutex_unlock(&event_mutex); | 1284 | goto out_unlock; |
1130 | return 0; | ||
1131 | } | 1285 | } |
1132 | 1286 | ||
1133 | err = -ENOMEM; | 1287 | err = -ENOMEM; |
@@ -1145,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1145 | goto out; | 1299 | goto out; |
1146 | } | 1300 | } |
1147 | 1301 | ||
1148 | err = replace_preds(NULL, call, ps, filter_string, false); | 1302 | err = replace_preds(call, call->filter, ps, filter_string, false); |
1149 | if (err) | 1303 | if (err) |
1150 | append_filter_err(ps, call->filter); | 1304 | append_filter_err(ps, call->filter); |
1151 | 1305 | else | |
1306 | call->filter_active = 1; | ||
1152 | out: | 1307 | out: |
1153 | filter_opstack_clear(ps); | 1308 | filter_opstack_clear(ps); |
1154 | postfix_clear(ps); | 1309 | postfix_clear(ps); |
@@ -1163,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1163 | char *filter_string) | 1318 | char *filter_string) |
1164 | { | 1319 | { |
1165 | int err; | 1320 | int err; |
1166 | |||
1167 | struct filter_parse_state *ps; | 1321 | struct filter_parse_state *ps; |
1168 | 1322 | ||
1169 | mutex_lock(&event_mutex); | 1323 | mutex_lock(&event_mutex); |
@@ -1173,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1173 | goto out_unlock; | 1327 | goto out_unlock; |
1174 | 1328 | ||
1175 | if (!strcmp(strstrip(filter_string), "0")) { | 1329 | if (!strcmp(strstrip(filter_string), "0")) { |
1176 | filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); | 1330 | filter_free_subsystem_preds(system); |
1177 | remove_filter_string(system->filter); | 1331 | remove_filter_string(system->filter); |
1178 | mutex_unlock(&event_mutex); | 1332 | goto out_unlock; |
1179 | return 0; | ||
1180 | } | 1333 | } |
1181 | 1334 | ||
1182 | err = -ENOMEM; | 1335 | err = -ENOMEM; |
@@ -1193,31 +1346,87 @@ int apply_subsystem_event_filter(struct event_subsystem *system, | |||
1193 | goto out; | 1346 | goto out; |
1194 | } | 1347 | } |
1195 | 1348 | ||
1196 | filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); | 1349 | err = replace_system_preds(system, ps, filter_string); |
1197 | 1350 | if (err) | |
1198 | /* try to see the filter can be applied to which events */ | ||
1199 | err = replace_preds(system, NULL, ps, filter_string, true); | ||
1200 | if (err) { | ||
1201 | append_filter_err(ps, system->filter); | 1351 | append_filter_err(ps, system->filter); |
1202 | goto out; | 1352 | |
1353 | out: | ||
1354 | filter_opstack_clear(ps); | ||
1355 | postfix_clear(ps); | ||
1356 | kfree(ps); | ||
1357 | out_unlock: | ||
1358 | mutex_unlock(&event_mutex); | ||
1359 | |||
1360 | return err; | ||
1361 | } | ||
1362 | |||
1363 | #ifdef CONFIG_EVENT_PROFILE | ||
1364 | |||
1365 | void ftrace_profile_free_filter(struct perf_event *event) | ||
1366 | { | ||
1367 | struct event_filter *filter = event->filter; | ||
1368 | |||
1369 | event->filter = NULL; | ||
1370 | __free_preds(filter); | ||
1371 | } | ||
1372 | |||
1373 | int ftrace_profile_set_filter(struct perf_event *event, int event_id, | ||
1374 | char *filter_str) | ||
1375 | { | ||
1376 | int err; | ||
1377 | struct event_filter *filter; | ||
1378 | struct filter_parse_state *ps; | ||
1379 | struct ftrace_event_call *call = NULL; | ||
1380 | |||
1381 | mutex_lock(&event_mutex); | ||
1382 | |||
1383 | list_for_each_entry(call, &ftrace_events, list) { | ||
1384 | if (call->id == event_id) | ||
1385 | break; | ||
1203 | } | 1386 | } |
1204 | 1387 | ||
1205 | filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); | 1388 | err = -EINVAL; |
1389 | if (!call) | ||
1390 | goto out_unlock; | ||
1391 | |||
1392 | err = -EEXIST; | ||
1393 | if (event->filter) | ||
1394 | goto out_unlock; | ||
1206 | 1395 | ||
1207 | /* really apply the filter to the events */ | 1396 | filter = __alloc_preds(); |
1208 | err = replace_preds(system, NULL, ps, filter_string, false); | 1397 | if (IS_ERR(filter)) { |
1209 | if (err) { | 1398 | err = PTR_ERR(filter); |
1210 | append_filter_err(ps, system->filter); | 1399 | goto out_unlock; |
1211 | filter_free_subsystem_preds(system, 2); | ||
1212 | } | 1400 | } |
1213 | 1401 | ||
1214 | out: | 1402 | err = -ENOMEM; |
1403 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); | ||
1404 | if (!ps) | ||
1405 | goto free_preds; | ||
1406 | |||
1407 | parse_init(ps, filter_ops, filter_str); | ||
1408 | err = filter_parse(ps); | ||
1409 | if (err) | ||
1410 | goto free_ps; | ||
1411 | |||
1412 | err = replace_preds(call, filter, ps, filter_str, false); | ||
1413 | if (!err) | ||
1414 | event->filter = filter; | ||
1415 | |||
1416 | free_ps: | ||
1215 | filter_opstack_clear(ps); | 1417 | filter_opstack_clear(ps); |
1216 | postfix_clear(ps); | 1418 | postfix_clear(ps); |
1217 | kfree(ps); | 1419 | kfree(ps); |
1420 | |||
1421 | free_preds: | ||
1422 | if (err) | ||
1423 | __free_preds(filter); | ||
1424 | |||
1218 | out_unlock: | 1425 | out_unlock: |
1219 | mutex_unlock(&event_mutex); | 1426 | mutex_unlock(&event_mutex); |
1220 | 1427 | ||
1221 | return err; | 1428 | return err; |
1222 | } | 1429 | } |
1223 | 1430 | ||
1431 | #endif /* CONFIG_EVENT_PROFILE */ | ||
1432 | |||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index df1bf6e48bb9..31da218ee10f 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -15,146 +15,128 @@ | |||
15 | 15 | ||
16 | #include "trace_output.h" | 16 | #include "trace_output.h" |
17 | 17 | ||
18 | #undef TRACE_SYSTEM | ||
19 | #define TRACE_SYSTEM ftrace | ||
18 | 20 | ||
19 | #undef TRACE_STRUCT | 21 | /* not needed for this file */ |
20 | #define TRACE_STRUCT(args...) args | 22 | #undef __field_struct |
23 | #define __field_struct(type, item) | ||
21 | 24 | ||
22 | extern void __bad_type_size(void); | 25 | #undef __field |
26 | #define __field(type, item) type item; | ||
23 | 27 | ||
24 | #undef TRACE_FIELD | 28 | #undef __field_desc |
25 | #define TRACE_FIELD(type, item, assign) \ | 29 | #define __field_desc(type, container, item) type item; |
26 | if (sizeof(type) != sizeof(field.item)) \ | 30 | |
27 | __bad_type_size(); \ | 31 | #undef __array |
32 | #define __array(type, item, size) type item[size]; | ||
33 | |||
34 | #undef __array_desc | ||
35 | #define __array_desc(type, container, item, size) type item[size]; | ||
36 | |||
37 | #undef __dynamic_array | ||
38 | #define __dynamic_array(type, item) type item[]; | ||
39 | |||
40 | #undef F_STRUCT | ||
41 | #define F_STRUCT(args...) args | ||
42 | |||
43 | #undef F_printk | ||
44 | #define F_printk(fmt, args...) fmt, args | ||
45 | |||
46 | #undef FTRACE_ENTRY | ||
47 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ | ||
48 | struct ____ftrace_##name { \ | ||
49 | tstruct \ | ||
50 | }; \ | ||
51 | static void __used ____ftrace_check_##name(void) \ | ||
52 | { \ | ||
53 | struct ____ftrace_##name *__entry = NULL; \ | ||
54 | \ | ||
55 | /* force cmpile-time check on F_printk() */ \ | ||
56 | printk(print); \ | ||
57 | } | ||
58 | |||
59 | #undef FTRACE_ENTRY_DUP | ||
60 | #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ | ||
61 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) | ||
62 | |||
63 | #include "trace_entries.h" | ||
64 | |||
65 | |||
66 | #undef __field | ||
67 | #define __field(type, item) \ | ||
28 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 68 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
29 | "offset:%u;\tsize:%u;\n", \ | 69 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
30 | (unsigned int)offsetof(typeof(field), item), \ | 70 | offsetof(typeof(field), item), \ |
31 | (unsigned int)sizeof(field.item)); \ | 71 | sizeof(field.item), is_signed_type(type)); \ |
32 | if (!ret) \ | 72 | if (!ret) \ |
33 | return 0; | 73 | return 0; |
34 | 74 | ||
75 | #undef __field_desc | ||
76 | #define __field_desc(type, container, item) \ | ||
77 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
78 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ | ||
79 | offsetof(typeof(field), container.item), \ | ||
80 | sizeof(field.container.item), \ | ||
81 | is_signed_type(type)); \ | ||
82 | if (!ret) \ | ||
83 | return 0; | ||
35 | 84 | ||
36 | #undef TRACE_FIELD_SPECIAL | 85 | #undef __array |
37 | #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ | 86 | #define __array(type, item, len) \ |
38 | ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \ | 87 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
39 | "offset:%u;\tsize:%u;\n", \ | 88 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
40 | (unsigned int)offsetof(typeof(field), item), \ | 89 | offsetof(typeof(field), item), \ |
41 | (unsigned int)sizeof(field.item)); \ | 90 | sizeof(field.item), is_signed_type(type)); \ |
42 | if (!ret) \ | 91 | if (!ret) \ |
43 | return 0; | 92 | return 0; |
44 | 93 | ||
45 | #undef TRACE_FIELD_ZERO_CHAR | 94 | #undef __array_desc |
46 | #define TRACE_FIELD_ZERO_CHAR(item) \ | 95 | #define __array_desc(type, container, item, len) \ |
47 | ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \ | 96 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
48 | "offset:%u;\tsize:0;\n", \ | 97 | "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ |
49 | (unsigned int)offsetof(typeof(field), item)); \ | 98 | offsetof(typeof(field), container.item), \ |
99 | sizeof(field.container.item), \ | ||
100 | is_signed_type(type)); \ | ||
50 | if (!ret) \ | 101 | if (!ret) \ |
51 | return 0; | 102 | return 0; |
52 | 103 | ||
53 | #undef TRACE_FIELD_SIGN | 104 | #undef __dynamic_array |
54 | #define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ | 105 | #define __dynamic_array(type, item) \ |
55 | TRACE_FIELD(type, item, assign) | 106 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
107 | "offset:%zu;\tsize:0;\tsigned:%u;\n", \ | ||
108 | offsetof(typeof(field), item), \ | ||
109 | is_signed_type(type)); \ | ||
110 | if (!ret) \ | ||
111 | return 0; | ||
56 | 112 | ||
57 | #undef TP_RAW_FMT | 113 | #undef F_printk |
58 | #define TP_RAW_FMT(args...) args | 114 | #define F_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) |
59 | 115 | ||
60 | #undef TRACE_EVENT_FORMAT | 116 | #undef __entry |
61 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | 117 | #define __entry REC |
62 | static int \ | ||
63 | ftrace_format_##call(struct ftrace_event_call *unused, \ | ||
64 | struct trace_seq *s) \ | ||
65 | { \ | ||
66 | struct args field; \ | ||
67 | int ret; \ | ||
68 | \ | ||
69 | tstruct; \ | ||
70 | \ | ||
71 | trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \ | ||
72 | \ | ||
73 | return ret; \ | ||
74 | } | ||
75 | 118 | ||
76 | #undef TRACE_EVENT_FORMAT_NOFILTER | 119 | #undef FTRACE_ENTRY |
77 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | 120 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
78 | tpfmt) \ | ||
79 | static int \ | 121 | static int \ |
80 | ftrace_format_##call(struct ftrace_event_call *unused, \ | 122 | ftrace_format_##name(struct ftrace_event_call *unused, \ |
81 | struct trace_seq *s) \ | 123 | struct trace_seq *s) \ |
82 | { \ | 124 | { \ |
83 | struct args field; \ | 125 | struct struct_name field __attribute__((unused)); \ |
84 | int ret; \ | 126 | int ret = 0; \ |
85 | \ | 127 | \ |
86 | tstruct; \ | 128 | tstruct; \ |
87 | \ | 129 | \ |
88 | trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \ | 130 | trace_seq_printf(s, "\nprint fmt: " print); \ |
89 | \ | 131 | \ |
90 | return ret; \ | 132 | return ret; \ |
91 | } | 133 | } |
92 | 134 | ||
93 | #include "trace_event_types.h" | 135 | #include "trace_entries.h" |
94 | |||
95 | #undef TRACE_ZERO_CHAR | ||
96 | #define TRACE_ZERO_CHAR(arg) | ||
97 | |||
98 | #undef TRACE_FIELD | ||
99 | #define TRACE_FIELD(type, item, assign)\ | ||
100 | entry->item = assign; | ||
101 | |||
102 | #undef TRACE_FIELD | ||
103 | #define TRACE_FIELD(type, item, assign)\ | ||
104 | entry->item = assign; | ||
105 | |||
106 | #undef TRACE_FIELD_SIGN | ||
107 | #define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ | ||
108 | TRACE_FIELD(type, item, assign) | ||
109 | |||
110 | #undef TP_CMD | ||
111 | #define TP_CMD(cmd...) cmd | ||
112 | |||
113 | #undef TRACE_ENTRY | ||
114 | #define TRACE_ENTRY entry | ||
115 | |||
116 | #undef TRACE_FIELD_SPECIAL | ||
117 | #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ | ||
118 | cmd; | ||
119 | |||
120 | #undef TRACE_EVENT_FORMAT | ||
121 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
122 | int ftrace_define_fields_##call(struct ftrace_event_call *event_call); \ | ||
123 | static int ftrace_raw_init_event_##call(void); \ | ||
124 | \ | ||
125 | struct ftrace_event_call __used \ | ||
126 | __attribute__((__aligned__(4))) \ | ||
127 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
128 | .name = #call, \ | ||
129 | .id = proto, \ | ||
130 | .system = __stringify(TRACE_SYSTEM), \ | ||
131 | .raw_init = ftrace_raw_init_event_##call, \ | ||
132 | .show_format = ftrace_format_##call, \ | ||
133 | .define_fields = ftrace_define_fields_##call, \ | ||
134 | }; \ | ||
135 | static int ftrace_raw_init_event_##call(void) \ | ||
136 | { \ | ||
137 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
138 | return 0; \ | ||
139 | } \ | ||
140 | |||
141 | #undef TRACE_EVENT_FORMAT_NOFILTER | ||
142 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | ||
143 | tpfmt) \ | ||
144 | \ | ||
145 | struct ftrace_event_call __used \ | ||
146 | __attribute__((__aligned__(4))) \ | ||
147 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
148 | .name = #call, \ | ||
149 | .id = proto, \ | ||
150 | .system = __stringify(TRACE_SYSTEM), \ | ||
151 | .show_format = ftrace_format_##call, \ | ||
152 | }; | ||
153 | 136 | ||
154 | #include "trace_event_types.h" | ||
155 | 137 | ||
156 | #undef TRACE_FIELD | 138 | #undef __field |
157 | #define TRACE_FIELD(type, item, assign) \ | 139 | #define __field(type, item) \ |
158 | ret = trace_define_field(event_call, #type, #item, \ | 140 | ret = trace_define_field(event_call, #type, #item, \ |
159 | offsetof(typeof(field), item), \ | 141 | offsetof(typeof(field), item), \ |
160 | sizeof(field.item), \ | 142 | sizeof(field.item), \ |
@@ -162,32 +144,45 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
162 | if (ret) \ | 144 | if (ret) \ |
163 | return ret; | 145 | return ret; |
164 | 146 | ||
165 | #undef TRACE_FIELD_SPECIAL | 147 | #undef __field_desc |
166 | #define TRACE_FIELD_SPECIAL(type, item, len, cmd) \ | 148 | #define __field_desc(type, container, item) \ |
149 | ret = trace_define_field(event_call, #type, #item, \ | ||
150 | offsetof(typeof(field), \ | ||
151 | container.item), \ | ||
152 | sizeof(field.container.item), \ | ||
153 | is_signed_type(type), FILTER_OTHER); \ | ||
154 | if (ret) \ | ||
155 | return ret; | ||
156 | |||
157 | #undef __array | ||
158 | #define __array(type, item, len) \ | ||
159 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | ||
167 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 160 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
168 | offsetof(typeof(field), item), \ | 161 | offsetof(typeof(field), item), \ |
169 | sizeof(field.item), 0, FILTER_OTHER); \ | 162 | sizeof(field.item), 0, FILTER_OTHER); \ |
170 | if (ret) \ | 163 | if (ret) \ |
171 | return ret; | 164 | return ret; |
172 | 165 | ||
173 | #undef TRACE_FIELD_SIGN | 166 | #undef __array_desc |
174 | #define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ | 167 | #define __array_desc(type, container, item, len) \ |
175 | ret = trace_define_field(event_call, #type, #item, \ | 168 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
176 | offsetof(typeof(field), item), \ | 169 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
177 | sizeof(field.item), is_signed, \ | 170 | offsetof(typeof(field), \ |
171 | container.item), \ | ||
172 | sizeof(field.container.item), 0, \ | ||
178 | FILTER_OTHER); \ | 173 | FILTER_OTHER); \ |
179 | if (ret) \ | 174 | if (ret) \ |
180 | return ret; | 175 | return ret; |
181 | 176 | ||
182 | #undef TRACE_FIELD_ZERO_CHAR | 177 | #undef __dynamic_array |
183 | #define TRACE_FIELD_ZERO_CHAR(item) | 178 | #define __dynamic_array(type, item) |
184 | 179 | ||
185 | #undef TRACE_EVENT_FORMAT | 180 | #undef FTRACE_ENTRY |
186 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | 181 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
187 | int \ | 182 | int \ |
188 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | 183 | ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ |
189 | { \ | 184 | { \ |
190 | struct args field; \ | 185 | struct struct_name field; \ |
191 | int ret; \ | 186 | int ret; \ |
192 | \ | 187 | \ |
193 | ret = trace_define_common_fields(event_call); \ | 188 | ret = trace_define_common_fields(event_call); \ |
@@ -199,8 +194,42 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |||
199 | return ret; \ | 194 | return ret; \ |
200 | } | 195 | } |
201 | 196 | ||
202 | #undef TRACE_EVENT_FORMAT_NOFILTER | 197 | #include "trace_entries.h" |
203 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | 198 | |
204 | tpfmt) | 199 | |
200 | #undef __field | ||
201 | #define __field(type, item) | ||
202 | |||
203 | #undef __field_desc | ||
204 | #define __field_desc(type, container, item) | ||
205 | |||
206 | #undef __array | ||
207 | #define __array(type, item, len) | ||
208 | |||
209 | #undef __array_desc | ||
210 | #define __array_desc(type, container, item, len) | ||
211 | |||
212 | #undef __dynamic_array | ||
213 | #define __dynamic_array(type, item) | ||
214 | |||
215 | #undef FTRACE_ENTRY | ||
216 | #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ | ||
217 | static int ftrace_raw_init_event_##call(void); \ | ||
218 | \ | ||
219 | struct ftrace_event_call __used \ | ||
220 | __attribute__((__aligned__(4))) \ | ||
221 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
222 | .name = #call, \ | ||
223 | .id = type, \ | ||
224 | .system = __stringify(TRACE_SYSTEM), \ | ||
225 | .raw_init = ftrace_raw_init_event_##call, \ | ||
226 | .show_format = ftrace_format_##call, \ | ||
227 | .define_fields = ftrace_define_fields_##call, \ | ||
228 | }; \ | ||
229 | static int ftrace_raw_init_event_##call(void) \ | ||
230 | { \ | ||
231 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
232 | return 0; \ | ||
233 | } \ | ||
205 | 234 | ||
206 | #include "trace_event_types.h" | 235 | #include "trace_entries.h" |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 5b01b94518fc..b3f3776b0cd6 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -290,7 +290,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | |||
290 | { | 290 | { |
291 | long count = (long)data; | 291 | long count = (long)data; |
292 | 292 | ||
293 | seq_printf(m, "%pf:", (void *)ip); | 293 | seq_printf(m, "%ps:", (void *)ip); |
294 | 294 | ||
295 | if (ops == &traceon_probe_ops) | 295 | if (ops == &traceon_probe_ops) |
296 | seq_printf(m, "traceon"); | 296 | seq_printf(m, "traceon"); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b3749a2c3132..45e6c01b2e4d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -124,7 +124,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
124 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | 124 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
125 | ftrace_graph_stop(); | 125 | ftrace_graph_stop(); |
126 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" | 126 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
127 | " from func %pF return to %lx\n", | 127 | " from func %ps return to %lx\n", |
128 | current->ret_stack[index].fp, | 128 | current->ret_stack[index].fp, |
129 | frame_pointer, | 129 | frame_pointer, |
130 | (void *)current->ret_stack[index].func, | 130 | (void *)current->ret_stack[index].func, |
@@ -364,6 +364,15 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | 366 | ||
367 | static enum print_line_t | ||
368 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | ||
369 | { | ||
370 | if (!trace_seq_putc(s, ' ')) | ||
371 | return 0; | ||
372 | |||
373 | return trace_print_lat_fmt(s, entry); | ||
374 | } | ||
375 | |||
367 | /* If the pid changed since the last trace, output this event */ | 376 | /* If the pid changed since the last trace, output this event */ |
368 | static enum print_line_t | 377 | static enum print_line_t |
369 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 378 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
@@ -521,6 +530,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
521 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 530 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
522 | return TRACE_TYPE_PARTIAL_LINE; | 531 | return TRACE_TYPE_PARTIAL_LINE; |
523 | } | 532 | } |
533 | |||
524 | /* Proc */ | 534 | /* Proc */ |
525 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | 535 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { |
526 | ret = print_graph_proc(s, pid); | 536 | ret = print_graph_proc(s, pid); |
@@ -659,7 +669,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
659 | return TRACE_TYPE_PARTIAL_LINE; | 669 | return TRACE_TYPE_PARTIAL_LINE; |
660 | } | 670 | } |
661 | 671 | ||
662 | ret = trace_seq_printf(s, "%pf();\n", (void *)call->func); | 672 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); |
663 | if (!ret) | 673 | if (!ret) |
664 | return TRACE_TYPE_PARTIAL_LINE; | 674 | return TRACE_TYPE_PARTIAL_LINE; |
665 | 675 | ||
@@ -702,7 +712,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
702 | return TRACE_TYPE_PARTIAL_LINE; | 712 | return TRACE_TYPE_PARTIAL_LINE; |
703 | } | 713 | } |
704 | 714 | ||
705 | ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func); | 715 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
706 | if (!ret) | 716 | if (!ret) |
707 | return TRACE_TYPE_PARTIAL_LINE; | 717 | return TRACE_TYPE_PARTIAL_LINE; |
708 | 718 | ||
@@ -758,6 +768,13 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
758 | return TRACE_TYPE_PARTIAL_LINE; | 768 | return TRACE_TYPE_PARTIAL_LINE; |
759 | } | 769 | } |
760 | 770 | ||
771 | /* Latency format */ | ||
772 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | ||
773 | ret = print_graph_lat_fmt(s, ent); | ||
774 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
775 | return TRACE_TYPE_PARTIAL_LINE; | ||
776 | } | ||
777 | |||
761 | return 0; | 778 | return 0; |
762 | } | 779 | } |
763 | 780 | ||
@@ -952,28 +969,59 @@ print_graph_function(struct trace_iterator *iter) | |||
952 | return TRACE_TYPE_HANDLED; | 969 | return TRACE_TYPE_HANDLED; |
953 | } | 970 | } |
954 | 971 | ||
972 | static void print_lat_header(struct seq_file *s) | ||
973 | { | ||
974 | static const char spaces[] = " " /* 16 spaces */ | ||
975 | " " /* 4 spaces */ | ||
976 | " "; /* 17 spaces */ | ||
977 | int size = 0; | ||
978 | |||
979 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
980 | size += 16; | ||
981 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | ||
982 | size += 4; | ||
983 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | ||
984 | size += 17; | ||
985 | |||
986 | seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); | ||
987 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | ||
988 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | ||
989 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | ||
990 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); | ||
991 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | ||
992 | } | ||
993 | |||
955 | static void print_graph_headers(struct seq_file *s) | 994 | static void print_graph_headers(struct seq_file *s) |
956 | { | 995 | { |
996 | int lat = trace_flags & TRACE_ITER_LATENCY_FMT; | ||
997 | |||
998 | if (lat) | ||
999 | print_lat_header(s); | ||
1000 | |||
957 | /* 1st line */ | 1001 | /* 1st line */ |
958 | seq_printf(s, "# "); | 1002 | seq_printf(s, "#"); |
959 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1003 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
960 | seq_printf(s, " TIME "); | 1004 | seq_printf(s, " TIME "); |
961 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1005 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
962 | seq_printf(s, "CPU"); | 1006 | seq_printf(s, " CPU"); |
963 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1007 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
964 | seq_printf(s, " TASK/PID "); | 1008 | seq_printf(s, " TASK/PID "); |
1009 | if (lat) | ||
1010 | seq_printf(s, "|||||"); | ||
965 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1011 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
966 | seq_printf(s, " DURATION "); | 1012 | seq_printf(s, " DURATION "); |
967 | seq_printf(s, " FUNCTION CALLS\n"); | 1013 | seq_printf(s, " FUNCTION CALLS\n"); |
968 | 1014 | ||
969 | /* 2nd line */ | 1015 | /* 2nd line */ |
970 | seq_printf(s, "# "); | 1016 | seq_printf(s, "#"); |
971 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | 1017 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) |
972 | seq_printf(s, " | "); | 1018 | seq_printf(s, " | "); |
973 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 1019 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
974 | seq_printf(s, "| "); | 1020 | seq_printf(s, " | "); |
975 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 1021 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
976 | seq_printf(s, " | | "); | 1022 | seq_printf(s, " | | "); |
1023 | if (lat) | ||
1024 | seq_printf(s, "|||||"); | ||
977 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | 1025 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
978 | seq_printf(s, " | | "); | 1026 | seq_printf(s, " | | "); |
979 | seq_printf(s, " | | | |\n"); | 1027 | seq_printf(s, " | | | |\n"); |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index ca7d7c4d0c2a..69543a905cd5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
155 | seq_print_ip_sym(seq, it->from, symflags) && | 155 | seq_print_ip_sym(seq, it->from, symflags) && |
156 | trace_seq_printf(seq, "\n")) | 156 | trace_seq_printf(seq, "\n")) |
157 | return TRACE_TYPE_HANDLED; | 157 | return TRACE_TYPE_HANDLED; |
158 | return TRACE_TYPE_PARTIAL_LINE;; | 158 | return TRACE_TYPE_PARTIAL_LINE; |
159 | } | 159 | } |
160 | return TRACE_TYPE_UNHANDLED; | 160 | return TRACE_TYPE_UNHANDLED; |
161 | } | 161 | } |
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
165 | struct ftrace_event_call *call = &event_hw_branch; | 165 | struct ftrace_event_call *call = &event_hw_branch; |
166 | struct trace_array *tr = hw_branch_trace; | 166 | struct trace_array *tr = hw_branch_trace; |
167 | struct ring_buffer_event *event; | 167 | struct ring_buffer_event *event; |
168 | struct ring_buffer *buf; | ||
168 | struct hw_branch_entry *entry; | 169 | struct hw_branch_entry *entry; |
169 | unsigned long irq1; | 170 | unsigned long irq1; |
170 | int cpu; | 171 | int cpu; |
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
180 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 181 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
181 | goto out; | 182 | goto out; |
182 | 183 | ||
183 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, | 184 | buf = tr->buffer; |
185 | event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, | ||
184 | sizeof(*entry), 0, 0); | 186 | sizeof(*entry), 0, 0); |
185 | if (!event) | 187 | if (!event) |
186 | goto out; | 188 | goto out; |
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | entry->ent.type = TRACE_HW_BRANCHES; | 191 | entry->ent.type = TRACE_HW_BRANCHES; |
190 | entry->from = from; | 192 | entry->from = from; |
191 | entry->to = to; | 193 | entry->to = to; |
192 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 194 | if (!filter_check_discard(call, entry, buf, event)) |
193 | trace_buffer_unlock_commit(tr, event, 0, 0); | 195 | trace_buffer_unlock_commit(buf, event, 0, 0); |
194 | 196 | ||
195 | out: | 197 | out: |
196 | atomic_dec(&tr->data[cpu]->disabled); | 198 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 5555b75a0d12..3aa7eaa2114c 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -129,15 +129,10 @@ check_critical_timing(struct trace_array *tr, | |||
129 | unsigned long parent_ip, | 129 | unsigned long parent_ip, |
130 | int cpu) | 130 | int cpu) |
131 | { | 131 | { |
132 | unsigned long latency, t0, t1; | ||
133 | cycle_t T0, T1, delta; | 132 | cycle_t T0, T1, delta; |
134 | unsigned long flags; | 133 | unsigned long flags; |
135 | int pc; | 134 | int pc; |
136 | 135 | ||
137 | /* | ||
138 | * usecs conversion is slow so we try to delay the conversion | ||
139 | * as long as possible: | ||
140 | */ | ||
141 | T0 = data->preempt_timestamp; | 136 | T0 = data->preempt_timestamp; |
142 | T1 = ftrace_now(cpu); | 137 | T1 = ftrace_now(cpu); |
143 | delta = T1-T0; | 138 | delta = T1-T0; |
@@ -157,18 +152,15 @@ check_critical_timing(struct trace_array *tr, | |||
157 | 152 | ||
158 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
159 | 154 | ||
160 | latency = nsecs_to_usecs(delta); | ||
161 | |||
162 | if (data->critical_sequence != max_sequence) | 155 | if (data->critical_sequence != max_sequence) |
163 | goto out_unlock; | 156 | goto out_unlock; |
164 | 157 | ||
165 | tracing_max_latency = delta; | ||
166 | t0 = nsecs_to_usecs(T0); | ||
167 | t1 = nsecs_to_usecs(T1); | ||
168 | |||
169 | data->critical_end = parent_ip; | 158 | data->critical_end = parent_ip; |
170 | 159 | ||
171 | update_max_tr_single(tr, current, cpu); | 160 | if (likely(!is_tracing_stopped())) { |
161 | tracing_max_latency = delta; | ||
162 | update_max_tr_single(tr, current, cpu); | ||
163 | } | ||
172 | 164 | ||
173 | max_sequence++; | 165 | max_sequence++; |
174 | 166 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index c4c9bbda53d3..0acd834659ed 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -307,6 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
307 | struct trace_array_cpu *data, | 307 | struct trace_array_cpu *data, |
308 | struct mmiotrace_rw *rw) | 308 | struct mmiotrace_rw *rw) |
309 | { | 309 | { |
310 | struct ftrace_event_call *call = &event_mmiotrace_rw; | ||
310 | struct ring_buffer *buffer = tr->buffer; | 311 | struct ring_buffer *buffer = tr->buffer; |
311 | struct ring_buffer_event *event; | 312 | struct ring_buffer_event *event; |
312 | struct trace_mmiotrace_rw *entry; | 313 | struct trace_mmiotrace_rw *entry; |
@@ -320,7 +321,9 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
320 | } | 321 | } |
321 | entry = ring_buffer_event_data(event); | 322 | entry = ring_buffer_event_data(event); |
322 | entry->rw = *rw; | 323 | entry->rw = *rw; |
323 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 324 | |
325 | if (!filter_check_discard(call, entry, buffer, event)) | ||
326 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
324 | } | 327 | } |
325 | 328 | ||
326 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 329 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
@@ -334,6 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
334 | struct trace_array_cpu *data, | 337 | struct trace_array_cpu *data, |
335 | struct mmiotrace_map *map) | 338 | struct mmiotrace_map *map) |
336 | { | 339 | { |
340 | struct ftrace_event_call *call = &event_mmiotrace_map; | ||
337 | struct ring_buffer *buffer = tr->buffer; | 341 | struct ring_buffer *buffer = tr->buffer; |
338 | struct ring_buffer_event *event; | 342 | struct ring_buffer_event *event; |
339 | struct trace_mmiotrace_map *entry; | 343 | struct trace_mmiotrace_map *entry; |
@@ -347,7 +351,9 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
347 | } | 351 | } |
348 | entry = ring_buffer_event_data(event); | 352 | entry = ring_buffer_event_data(event); |
349 | entry->map = *map; | 353 | entry->map = *map; |
350 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 354 | |
355 | if (!filter_check_discard(call, entry, buffer, event)) | ||
356 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
351 | } | 357 | } |
352 | 358 | ||
353 | void mmio_trace_mapping(struct mmiotrace_map *map) | 359 | void mmio_trace_mapping(struct mmiotrace_map *map) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e0c2545622e8..ed17565826b0 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -407,7 +407,7 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
407 | * since individual threads might have already quit! | 407 | * since individual threads might have already quit! |
408 | */ | 408 | */ |
409 | rcu_read_lock(); | 409 | rcu_read_lock(); |
410 | task = find_task_by_vpid(entry->ent.tgid); | 410 | task = find_task_by_vpid(entry->tgid); |
411 | if (task) | 411 | if (task) |
412 | mm = get_task_mm(task); | 412 | mm = get_task_mm(task); |
413 | rcu_read_unlock(); | 413 | rcu_read_unlock(); |
@@ -460,18 +460,23 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
460 | return ret; | 460 | return ret; |
461 | } | 461 | } |
462 | 462 | ||
463 | static int | 463 | /** |
464 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | 464 | * trace_print_lat_fmt - print the irq, preempt and lockdep fields |
465 | * @s: trace seq struct to write to | ||
466 | * @entry: The trace entry field from the ring buffer | ||
467 | * | ||
468 | * Prints the generic fields of irqs off, in hard or softirq, preempt | ||
469 | * count and lock depth. | ||
470 | */ | ||
471 | int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | ||
465 | { | 472 | { |
466 | int hardirq, softirq; | 473 | int hardirq, softirq; |
467 | char comm[TASK_COMM_LEN]; | 474 | int ret; |
468 | 475 | ||
469 | trace_find_cmdline(entry->pid, comm); | ||
470 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | 476 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
471 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | 477 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; |
472 | 478 | ||
473 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c", | 479 | if (!trace_seq_printf(s, "%c%c%c", |
474 | comm, entry->pid, cpu, | ||
475 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | 480 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : |
476 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? | 481 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? |
477 | 'X' : '.', | 482 | 'X' : '.', |
@@ -482,8 +487,31 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
482 | return 0; | 487 | return 0; |
483 | 488 | ||
484 | if (entry->preempt_count) | 489 | if (entry->preempt_count) |
485 | return trace_seq_printf(s, "%x", entry->preempt_count); | 490 | ret = trace_seq_printf(s, "%x", entry->preempt_count); |
486 | return trace_seq_puts(s, "."); | 491 | else |
492 | ret = trace_seq_putc(s, '.'); | ||
493 | |||
494 | if (!ret) | ||
495 | return 0; | ||
496 | |||
497 | if (entry->lock_depth < 0) | ||
498 | return trace_seq_putc(s, '.'); | ||
499 | |||
500 | return trace_seq_printf(s, "%d", entry->lock_depth); | ||
501 | } | ||
502 | |||
503 | static int | ||
504 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
505 | { | ||
506 | char comm[TASK_COMM_LEN]; | ||
507 | |||
508 | trace_find_cmdline(entry->pid, comm); | ||
509 | |||
510 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d", | ||
511 | comm, entry->pid, cpu)) | ||
512 | return 0; | ||
513 | |||
514 | return trace_print_lat_fmt(s, entry); | ||
487 | } | 515 | } |
488 | 516 | ||
489 | static unsigned long preempt_mark_thresh = 100; | 517 | static unsigned long preempt_mark_thresh = 100; |
@@ -857,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
857 | trace_assign_type(field, iter->ent); | 885 | trace_assign_type(field, iter->ent); |
858 | 886 | ||
859 | if (!S) | 887 | if (!S) |
860 | task_state_char(field->prev_state); | 888 | S = task_state_char(field->prev_state); |
861 | T = task_state_char(field->next_state); | 889 | T = task_state_char(field->next_state); |
862 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 890 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
863 | field->prev_pid, | 891 | field->prev_pid, |
@@ -892,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
892 | trace_assign_type(field, iter->ent); | 920 | trace_assign_type(field, iter->ent); |
893 | 921 | ||
894 | if (!S) | 922 | if (!S) |
895 | task_state_char(field->prev_state); | 923 | S = task_state_char(field->prev_state); |
896 | T = task_state_char(field->next_state); | 924 | T = task_state_char(field->next_state); |
897 | 925 | ||
898 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 926 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index d38bec4a9c30..9d91c72ba38b 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -26,6 +26,8 @@ extern struct trace_event *ftrace_find_event(int type); | |||
26 | 26 | ||
27 | extern enum print_line_t trace_nop_print(struct trace_iterator *iter, | 27 | extern enum print_line_t trace_nop_print(struct trace_iterator *iter, |
28 | int flags); | 28 | int flags); |
29 | extern int | ||
30 | trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); | ||
29 | 31 | ||
30 | /* used by module unregistering */ | 32 | /* used by module unregistering */ |
31 | extern int __unregister_ftrace_event(struct trace_event *event); | 33 | extern int __unregister_ftrace_event(struct trace_event *event); |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c deleted file mode 100644 index fe1a00f1445a..000000000000 --- a/kernel/trace/trace_power.c +++ /dev/null | |||
@@ -1,218 +0,0 @@ | |||
1 | /* | ||
2 | * ring buffer based C-state tracer | ||
3 | * | ||
4 | * Arjan van de Ven <arjan@linux.intel.com> | ||
5 | * Copyright (C) 2008 Intel Corporation | ||
6 | * | ||
7 | * Much is borrowed from trace_boot.c which is | ||
8 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <trace/power.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include "trace.h" | ||
19 | #include "trace_output.h" | ||
20 | |||
21 | static struct trace_array *power_trace; | ||
22 | static int __read_mostly trace_power_enabled; | ||
23 | |||
24 | static void probe_power_start(struct power_trace *it, unsigned int type, | ||
25 | unsigned int level) | ||
26 | { | ||
27 | if (!trace_power_enabled) | ||
28 | return; | ||
29 | |||
30 | memset(it, 0, sizeof(struct power_trace)); | ||
31 | it->state = level; | ||
32 | it->type = type; | ||
33 | it->stamp = ktime_get(); | ||
34 | } | ||
35 | |||
36 | |||
37 | static void probe_power_end(struct power_trace *it) | ||
38 | { | ||
39 | struct ftrace_event_call *call = &event_power; | ||
40 | struct ring_buffer_event *event; | ||
41 | struct ring_buffer *buffer; | ||
42 | struct trace_power *entry; | ||
43 | struct trace_array_cpu *data; | ||
44 | struct trace_array *tr = power_trace; | ||
45 | |||
46 | if (!trace_power_enabled) | ||
47 | return; | ||
48 | |||
49 | buffer = tr->buffer; | ||
50 | |||
51 | preempt_disable(); | ||
52 | it->end = ktime_get(); | ||
53 | data = tr->data[smp_processor_id()]; | ||
54 | |||
55 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, | ||
56 | sizeof(*entry), 0, 0); | ||
57 | if (!event) | ||
58 | goto out; | ||
59 | entry = ring_buffer_event_data(event); | ||
60 | entry->state_data = *it; | ||
61 | if (!filter_check_discard(call, entry, buffer, event)) | ||
62 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
63 | out: | ||
64 | preempt_enable(); | ||
65 | } | ||
66 | |||
67 | static void probe_power_mark(struct power_trace *it, unsigned int type, | ||
68 | unsigned int level) | ||
69 | { | ||
70 | struct ftrace_event_call *call = &event_power; | ||
71 | struct ring_buffer_event *event; | ||
72 | struct ring_buffer *buffer; | ||
73 | struct trace_power *entry; | ||
74 | struct trace_array_cpu *data; | ||
75 | struct trace_array *tr = power_trace; | ||
76 | |||
77 | if (!trace_power_enabled) | ||
78 | return; | ||
79 | |||
80 | buffer = tr->buffer; | ||
81 | |||
82 | memset(it, 0, sizeof(struct power_trace)); | ||
83 | it->state = level; | ||
84 | it->type = type; | ||
85 | it->stamp = ktime_get(); | ||
86 | preempt_disable(); | ||
87 | it->end = it->stamp; | ||
88 | data = tr->data[smp_processor_id()]; | ||
89 | |||
90 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, | ||
91 | sizeof(*entry), 0, 0); | ||
92 | if (!event) | ||
93 | goto out; | ||
94 | entry = ring_buffer_event_data(event); | ||
95 | entry->state_data = *it; | ||
96 | if (!filter_check_discard(call, entry, buffer, event)) | ||
97 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
98 | out: | ||
99 | preempt_enable(); | ||
100 | } | ||
101 | |||
102 | static int tracing_power_register(void) | ||
103 | { | ||
104 | int ret; | ||
105 | |||
106 | ret = register_trace_power_start(probe_power_start); | ||
107 | if (ret) { | ||
108 | pr_info("power trace: Couldn't activate tracepoint" | ||
109 | " probe to trace_power_start\n"); | ||
110 | return ret; | ||
111 | } | ||
112 | ret = register_trace_power_end(probe_power_end); | ||
113 | if (ret) { | ||
114 | pr_info("power trace: Couldn't activate tracepoint" | ||
115 | " probe to trace_power_end\n"); | ||
116 | goto fail_start; | ||
117 | } | ||
118 | ret = register_trace_power_mark(probe_power_mark); | ||
119 | if (ret) { | ||
120 | pr_info("power trace: Couldn't activate tracepoint" | ||
121 | " probe to trace_power_mark\n"); | ||
122 | goto fail_end; | ||
123 | } | ||
124 | return ret; | ||
125 | fail_end: | ||
126 | unregister_trace_power_end(probe_power_end); | ||
127 | fail_start: | ||
128 | unregister_trace_power_start(probe_power_start); | ||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | static void start_power_trace(struct trace_array *tr) | ||
133 | { | ||
134 | trace_power_enabled = 1; | ||
135 | } | ||
136 | |||
137 | static void stop_power_trace(struct trace_array *tr) | ||
138 | { | ||
139 | trace_power_enabled = 0; | ||
140 | } | ||
141 | |||
142 | static void power_trace_reset(struct trace_array *tr) | ||
143 | { | ||
144 | trace_power_enabled = 0; | ||
145 | unregister_trace_power_start(probe_power_start); | ||
146 | unregister_trace_power_end(probe_power_end); | ||
147 | unregister_trace_power_mark(probe_power_mark); | ||
148 | } | ||
149 | |||
150 | |||
151 | static int power_trace_init(struct trace_array *tr) | ||
152 | { | ||
153 | power_trace = tr; | ||
154 | |||
155 | trace_power_enabled = 1; | ||
156 | tracing_power_register(); | ||
157 | |||
158 | tracing_reset_online_cpus(tr); | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static enum print_line_t power_print_line(struct trace_iterator *iter) | ||
163 | { | ||
164 | int ret = 0; | ||
165 | struct trace_entry *entry = iter->ent; | ||
166 | struct trace_power *field ; | ||
167 | struct power_trace *it; | ||
168 | struct trace_seq *s = &iter->seq; | ||
169 | struct timespec stamp; | ||
170 | struct timespec duration; | ||
171 | |||
172 | trace_assign_type(field, entry); | ||
173 | it = &field->state_data; | ||
174 | stamp = ktime_to_timespec(it->stamp); | ||
175 | duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); | ||
176 | |||
177 | if (entry->type == TRACE_POWER) { | ||
178 | if (it->type == POWER_CSTATE) | ||
179 | ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", | ||
180 | stamp.tv_sec, | ||
181 | stamp.tv_nsec, | ||
182 | it->state, iter->cpu, | ||
183 | duration.tv_sec, | ||
184 | duration.tv_nsec); | ||
185 | if (it->type == POWER_PSTATE) | ||
186 | ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", | ||
187 | stamp.tv_sec, | ||
188 | stamp.tv_nsec, | ||
189 | it->state, iter->cpu); | ||
190 | if (!ret) | ||
191 | return TRACE_TYPE_PARTIAL_LINE; | ||
192 | return TRACE_TYPE_HANDLED; | ||
193 | } | ||
194 | return TRACE_TYPE_UNHANDLED; | ||
195 | } | ||
196 | |||
197 | static void power_print_header(struct seq_file *s) | ||
198 | { | ||
199 | seq_puts(s, "# TIMESTAMP STATE EVENT\n"); | ||
200 | seq_puts(s, "# | | |\n"); | ||
201 | } | ||
202 | |||
203 | static struct tracer power_tracer __read_mostly = | ||
204 | { | ||
205 | .name = "power", | ||
206 | .init = power_trace_init, | ||
207 | .start = start_power_trace, | ||
208 | .stop = stop_power_trace, | ||
209 | .reset = power_trace_reset, | ||
210 | .print_line = power_print_line, | ||
211 | .print_header = power_print_header, | ||
212 | }; | ||
213 | |||
214 | static int init_power_trace(void) | ||
215 | { | ||
216 | return register_tracer(&power_tracer); | ||
217 | } | ||
218 | device_initcall(init_power_trace); | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 687699d365ae..2547d8813cf0 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/marker.h> | ||
15 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
16 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
17 | #include <linux/list.h> | 16 | #include <linux/list.h> |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index ad69f105a7c6..26185d727676 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -24,6 +24,7 @@ static int __read_mostly tracer_enabled; | |||
24 | 24 | ||
25 | static struct task_struct *wakeup_task; | 25 | static struct task_struct *wakeup_task; |
26 | static int wakeup_cpu; | 26 | static int wakeup_cpu; |
27 | static int wakeup_current_cpu; | ||
27 | static unsigned wakeup_prio = -1; | 28 | static unsigned wakeup_prio = -1; |
28 | static int wakeup_rt; | 29 | static int wakeup_rt; |
29 | 30 | ||
@@ -56,33 +57,23 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
56 | resched = ftrace_preempt_disable(); | 57 | resched = ftrace_preempt_disable(); |
57 | 58 | ||
58 | cpu = raw_smp_processor_id(); | 59 | cpu = raw_smp_processor_id(); |
60 | if (cpu != wakeup_current_cpu) | ||
61 | goto out_enable; | ||
62 | |||
59 | data = tr->data[cpu]; | 63 | data = tr->data[cpu]; |
60 | disabled = atomic_inc_return(&data->disabled); | 64 | disabled = atomic_inc_return(&data->disabled); |
61 | if (unlikely(disabled != 1)) | 65 | if (unlikely(disabled != 1)) |
62 | goto out; | 66 | goto out; |
63 | 67 | ||
64 | local_irq_save(flags); | 68 | local_irq_save(flags); |
65 | __raw_spin_lock(&wakeup_lock); | ||
66 | |||
67 | if (unlikely(!wakeup_task)) | ||
68 | goto unlock; | ||
69 | |||
70 | /* | ||
71 | * The task can't disappear because it needs to | ||
72 | * wake up first, and we have the wakeup_lock. | ||
73 | */ | ||
74 | if (task_cpu(wakeup_task) != cpu) | ||
75 | goto unlock; | ||
76 | 69 | ||
77 | trace_function(tr, ip, parent_ip, flags, pc); | 70 | trace_function(tr, ip, parent_ip, flags, pc); |
78 | 71 | ||
79 | unlock: | ||
80 | __raw_spin_unlock(&wakeup_lock); | ||
81 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
82 | 73 | ||
83 | out: | 74 | out: |
84 | atomic_dec(&data->disabled); | 75 | atomic_dec(&data->disabled); |
85 | 76 | out_enable: | |
86 | ftrace_preempt_enable(resched); | 77 | ftrace_preempt_enable(resched); |
87 | } | 78 | } |
88 | 79 | ||
@@ -107,11 +98,18 @@ static int report_latency(cycle_t delta) | |||
107 | return 1; | 98 | return 1; |
108 | } | 99 | } |
109 | 100 | ||
101 | static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) | ||
102 | { | ||
103 | if (task != wakeup_task) | ||
104 | return; | ||
105 | |||
106 | wakeup_current_cpu = cpu; | ||
107 | } | ||
108 | |||
110 | static void notrace | 109 | static void notrace |
111 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | 110 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, |
112 | struct task_struct *next) | 111 | struct task_struct *next) |
113 | { | 112 | { |
114 | unsigned long latency = 0, t0 = 0, t1 = 0; | ||
115 | struct trace_array_cpu *data; | 113 | struct trace_array_cpu *data; |
116 | cycle_t T0, T1, delta; | 114 | cycle_t T0, T1, delta; |
117 | unsigned long flags; | 115 | unsigned long flags; |
@@ -157,10 +155,6 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
157 | trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); | 155 | trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
158 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | 156 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
159 | 157 | ||
160 | /* | ||
161 | * usecs conversion is slow so we try to delay the conversion | ||
162 | * as long as possible: | ||
163 | */ | ||
164 | T0 = data->preempt_timestamp; | 158 | T0 = data->preempt_timestamp; |
165 | T1 = ftrace_now(cpu); | 159 | T1 = ftrace_now(cpu); |
166 | delta = T1-T0; | 160 | delta = T1-T0; |
@@ -168,13 +162,10 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
168 | if (!report_latency(delta)) | 162 | if (!report_latency(delta)) |
169 | goto out_unlock; | 163 | goto out_unlock; |
170 | 164 | ||
171 | latency = nsecs_to_usecs(delta); | 165 | if (likely(!is_tracing_stopped())) { |
172 | 166 | tracing_max_latency = delta; | |
173 | tracing_max_latency = delta; | 167 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
174 | t0 = nsecs_to_usecs(T0); | 168 | } |
175 | t1 = nsecs_to_usecs(T1); | ||
176 | |||
177 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); | ||
178 | 169 | ||
179 | out_unlock: | 170 | out_unlock: |
180 | __wakeup_reset(wakeup_trace); | 171 | __wakeup_reset(wakeup_trace); |
@@ -244,6 +235,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
244 | __wakeup_reset(wakeup_trace); | 235 | __wakeup_reset(wakeup_trace); |
245 | 236 | ||
246 | wakeup_cpu = task_cpu(p); | 237 | wakeup_cpu = task_cpu(p); |
238 | wakeup_current_cpu = wakeup_cpu; | ||
247 | wakeup_prio = p->prio; | 239 | wakeup_prio = p->prio; |
248 | 240 | ||
249 | wakeup_task = p; | 241 | wakeup_task = p; |
@@ -293,6 +285,13 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
293 | goto fail_deprobe_wake_new; | 285 | goto fail_deprobe_wake_new; |
294 | } | 286 | } |
295 | 287 | ||
288 | ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task); | ||
289 | if (ret) { | ||
290 | pr_info("wakeup trace: Couldn't activate tracepoint" | ||
291 | " probe to kernel_sched_migrate_task\n"); | ||
292 | return; | ||
293 | } | ||
294 | |||
296 | wakeup_reset(tr); | 295 | wakeup_reset(tr); |
297 | 296 | ||
298 | /* | 297 | /* |
@@ -325,6 +324,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
325 | unregister_trace_sched_switch(probe_wakeup_sched_switch); | 324 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
326 | unregister_trace_sched_wakeup_new(probe_wakeup); | 325 | unregister_trace_sched_wakeup_new(probe_wakeup); |
327 | unregister_trace_sched_wakeup(probe_wakeup); | 326 | unregister_trace_sched_wakeup(probe_wakeup); |
327 | unregister_trace_sched_migrate_task(probe_wakeup_migrate_task); | ||
328 | } | 328 | } |
329 | 329 | ||
330 | static int __wakeup_tracer_init(struct trace_array *tr) | 330 | static int __wakeup_tracer_init(struct trace_array *tr) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 0f6facb050a1..8504ac71e4e8 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = { | |||
296 | 296 | ||
297 | int | 297 | int |
298 | stack_trace_sysctl(struct ctl_table *table, int write, | 298 | stack_trace_sysctl(struct ctl_table *table, int write, |
299 | struct file *file, void __user *buffer, size_t *lenp, | 299 | void __user *buffer, size_t *lenp, |
300 | loff_t *ppos) | 300 | loff_t *ppos) |
301 | { | 301 | { |
302 | int ret; | 302 | int ret; |
303 | 303 | ||
304 | mutex_lock(&stack_sysctl_mutex); | 304 | mutex_lock(&stack_sysctl_mutex); |
305 | 305 | ||
306 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 306 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
307 | 307 | ||
308 | if (ret || !write || | 308 | if (ret || !write || |
309 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) | 309 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8712ce3c6a0e..d00d1a8f1f26 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <trace/events/syscalls.h> | 2 | #include <trace/events/syscalls.h> |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/ftrace.h> | 4 | #include <linux/ftrace.h> |
5 | #include <linux/perf_counter.h> | 5 | #include <linux/perf_event.h> |
6 | #include <asm/syscall.h> | 6 | #include <asm/syscall.h> |
7 | 7 | ||
8 | #include "trace_output.h" | 8 | #include "trace_output.h" |
@@ -14,6 +14,69 @@ static int sys_refcount_exit; | |||
14 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | 14 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); |
15 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | 15 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); |
16 | 16 | ||
17 | extern unsigned long __start_syscalls_metadata[]; | ||
18 | extern unsigned long __stop_syscalls_metadata[]; | ||
19 | |||
20 | static struct syscall_metadata **syscalls_metadata; | ||
21 | |||
22 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | ||
23 | { | ||
24 | struct syscall_metadata *start; | ||
25 | struct syscall_metadata *stop; | ||
26 | char str[KSYM_SYMBOL_LEN]; | ||
27 | |||
28 | |||
29 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
30 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
31 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | ||
32 | |||
33 | for ( ; start < stop; start++) { | ||
34 | /* | ||
35 | * Only compare after the "sys" prefix. Archs that use | ||
36 | * syscall wrappers may have syscalls symbols aliases prefixed | ||
37 | * with "SyS" instead of "sys", leading to an unwanted | ||
38 | * mismatch. | ||
39 | */ | ||
40 | if (start->name && !strcmp(start->name + 3, str + 3)) | ||
41 | return start; | ||
42 | } | ||
43 | return NULL; | ||
44 | } | ||
45 | |||
46 | static struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
47 | { | ||
48 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) | ||
49 | return NULL; | ||
50 | |||
51 | return syscalls_metadata[nr]; | ||
52 | } | ||
53 | |||
54 | int syscall_name_to_nr(char *name) | ||
55 | { | ||
56 | int i; | ||
57 | |||
58 | if (!syscalls_metadata) | ||
59 | return -1; | ||
60 | |||
61 | for (i = 0; i < NR_syscalls; i++) { | ||
62 | if (syscalls_metadata[i]) { | ||
63 | if (!strcmp(syscalls_metadata[i]->name, name)) | ||
64 | return i; | ||
65 | } | ||
66 | } | ||
67 | return -1; | ||
68 | } | ||
69 | |||
70 | void set_syscall_enter_id(int num, int id) | ||
71 | { | ||
72 | syscalls_metadata[num]->enter_id = id; | ||
73 | } | ||
74 | |||
75 | void set_syscall_exit_id(int num, int id) | ||
76 | { | ||
77 | syscalls_metadata[num]->exit_id = id; | ||
78 | } | ||
79 | |||
17 | enum print_line_t | 80 | enum print_line_t |
18 | print_syscall_enter(struct trace_iterator *iter, int flags) | 81 | print_syscall_enter(struct trace_iterator *iter, int flags) |
19 | { | 82 | { |
@@ -103,7 +166,8 @@ extern char *__bad_type_size(void); | |||
103 | #define SYSCALL_FIELD(type, name) \ | 166 | #define SYSCALL_FIELD(type, name) \ |
104 | sizeof(type) != sizeof(trace.name) ? \ | 167 | sizeof(type) != sizeof(trace.name) ? \ |
105 | __bad_type_size() : \ | 168 | __bad_type_size() : \ |
106 | #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) | 169 | #type, #name, offsetof(typeof(trace), name), \ |
170 | sizeof(trace.name), is_signed_type(type) | ||
107 | 171 | ||
108 | int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) | 172 | int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) |
109 | { | 173 | { |
@@ -120,7 +184,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
120 | if (!entry) | 184 | if (!entry) |
121 | return 0; | 185 | return 0; |
122 | 186 | ||
123 | ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 187 | ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" |
188 | "\tsigned:%u;\n", | ||
124 | SYSCALL_FIELD(int, nr)); | 189 | SYSCALL_FIELD(int, nr)); |
125 | if (!ret) | 190 | if (!ret) |
126 | return 0; | 191 | return 0; |
@@ -130,8 +195,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
130 | entry->args[i]); | 195 | entry->args[i]); |
131 | if (!ret) | 196 | if (!ret) |
132 | return 0; | 197 | return 0; |
133 | ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, | 198 | ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;" |
134 | sizeof(unsigned long)); | 199 | "\tsigned:%u;\n", offset, |
200 | sizeof(unsigned long), | ||
201 | is_signed_type(unsigned long)); | ||
135 | if (!ret) | 202 | if (!ret) |
136 | return 0; | 203 | return 0; |
137 | offset += sizeof(unsigned long); | 204 | offset += sizeof(unsigned long); |
@@ -163,10 +230,12 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
163 | struct syscall_trace_exit trace; | 230 | struct syscall_trace_exit trace; |
164 | 231 | ||
165 | ret = trace_seq_printf(s, | 232 | ret = trace_seq_printf(s, |
166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 233 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" |
167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 234 | "\tsigned:%u;\n" |
235 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" | ||
236 | "\tsigned:%u;\n", | ||
168 | SYSCALL_FIELD(int, nr), | 237 | SYSCALL_FIELD(int, nr), |
169 | SYSCALL_FIELD(unsigned long, ret)); | 238 | SYSCALL_FIELD(long, ret)); |
170 | if (!ret) | 239 | if (!ret) |
171 | return 0; | 240 | return 0; |
172 | 241 | ||
@@ -212,7 +281,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
212 | if (ret) | 281 | if (ret) |
213 | return ret; | 282 | return ret; |
214 | 283 | ||
215 | ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, | 284 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), |
216 | FILTER_OTHER); | 285 | FILTER_OTHER); |
217 | 286 | ||
218 | return ret; | 287 | return ret; |
@@ -375,6 +444,29 @@ struct trace_event event_syscall_exit = { | |||
375 | .trace = print_syscall_exit, | 444 | .trace = print_syscall_exit, |
376 | }; | 445 | }; |
377 | 446 | ||
447 | int __init init_ftrace_syscalls(void) | ||
448 | { | ||
449 | struct syscall_metadata *meta; | ||
450 | unsigned long addr; | ||
451 | int i; | ||
452 | |||
453 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
454 | NR_syscalls, GFP_KERNEL); | ||
455 | if (!syscalls_metadata) { | ||
456 | WARN_ON(1); | ||
457 | return -ENOMEM; | ||
458 | } | ||
459 | |||
460 | for (i = 0; i < NR_syscalls; i++) { | ||
461 | addr = arch_syscall_addr(i); | ||
462 | meta = find_syscall_meta(addr); | ||
463 | syscalls_metadata[i] = meta; | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | core_initcall(init_ftrace_syscalls); | ||
469 | |||
378 | #ifdef CONFIG_EVENT_PROFILE | 470 | #ifdef CONFIG_EVENT_PROFILE |
379 | 471 | ||
380 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 472 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); |
@@ -384,10 +476,13 @@ static int sys_prof_refcount_exit; | |||
384 | 476 | ||
385 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 477 | static void prof_syscall_enter(struct pt_regs *regs, long id) |
386 | { | 478 | { |
387 | struct syscall_trace_enter *rec; | ||
388 | struct syscall_metadata *sys_data; | 479 | struct syscall_metadata *sys_data; |
480 | struct syscall_trace_enter *rec; | ||
481 | unsigned long flags; | ||
482 | char *raw_data; | ||
389 | int syscall_nr; | 483 | int syscall_nr; |
390 | int size; | 484 | int size; |
485 | int cpu; | ||
391 | 486 | ||
392 | syscall_nr = syscall_get_nr(current, regs); | 487 | syscall_nr = syscall_get_nr(current, regs); |
393 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 488 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) |
@@ -402,20 +497,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
402 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 497 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
403 | size -= sizeof(u32); | 498 | size -= sizeof(u32); |
404 | 499 | ||
405 | do { | 500 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, |
406 | char raw_data[size]; | 501 | "profile buffer not large enough")) |
502 | return; | ||
503 | |||
504 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
505 | local_irq_save(flags); | ||
407 | 506 | ||
408 | /* zero the dead bytes from align to not leak stack to user */ | 507 | cpu = smp_processor_id(); |
409 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
410 | 508 | ||
411 | rec = (struct syscall_trace_enter *) raw_data; | 509 | if (in_nmi()) |
412 | tracing_generic_entry_update(&rec->ent, 0, 0); | 510 | raw_data = rcu_dereference(trace_profile_buf_nmi); |
413 | rec->ent.type = sys_data->enter_id; | 511 | else |
414 | rec->nr = syscall_nr; | 512 | raw_data = rcu_dereference(trace_profile_buf); |
415 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 513 | |
416 | (unsigned long *)&rec->args); | 514 | if (!raw_data) |
417 | perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); | 515 | goto end; |
418 | } while(0); | 516 | |
517 | raw_data = per_cpu_ptr(raw_data, cpu); | ||
518 | |||
519 | /* zero the dead bytes from align to not leak stack to user */ | ||
520 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
521 | |||
522 | rec = (struct syscall_trace_enter *) raw_data; | ||
523 | tracing_generic_entry_update(&rec->ent, 0, 0); | ||
524 | rec->ent.type = sys_data->enter_id; | ||
525 | rec->nr = syscall_nr; | ||
526 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | ||
527 | (unsigned long *)&rec->args); | ||
528 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); | ||
529 | |||
530 | end: | ||
531 | local_irq_restore(flags); | ||
419 | } | 532 | } |
420 | 533 | ||
421 | int reg_prof_syscall_enter(char *name) | 534 | int reg_prof_syscall_enter(char *name) |
@@ -460,8 +573,12 @@ void unreg_prof_syscall_enter(char *name) | |||
460 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 573 | static void prof_syscall_exit(struct pt_regs *regs, long ret) |
461 | { | 574 | { |
462 | struct syscall_metadata *sys_data; | 575 | struct syscall_metadata *sys_data; |
463 | struct syscall_trace_exit rec; | 576 | struct syscall_trace_exit *rec; |
577 | unsigned long flags; | ||
464 | int syscall_nr; | 578 | int syscall_nr; |
579 | char *raw_data; | ||
580 | int size; | ||
581 | int cpu; | ||
465 | 582 | ||
466 | syscall_nr = syscall_get_nr(current, regs); | 583 | syscall_nr = syscall_get_nr(current, regs); |
467 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 584 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) |
@@ -471,12 +588,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
471 | if (!sys_data) | 588 | if (!sys_data) |
472 | return; | 589 | return; |
473 | 590 | ||
474 | tracing_generic_entry_update(&rec.ent, 0, 0); | 591 | /* We can probably do that at build time */ |
475 | rec.ent.type = sys_data->exit_id; | 592 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
476 | rec.nr = syscall_nr; | 593 | size -= sizeof(u32); |
477 | rec.ret = syscall_get_return_value(current, regs); | 594 | |
595 | /* | ||
596 | * Impossible, but be paranoid with the future | ||
597 | * How to put this check outside runtime? | ||
598 | */ | ||
599 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
600 | "exit event has grown above profile buffer size")) | ||
601 | return; | ||
602 | |||
603 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
604 | local_irq_save(flags); | ||
605 | cpu = smp_processor_id(); | ||
606 | |||
607 | if (in_nmi()) | ||
608 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
609 | else | ||
610 | raw_data = rcu_dereference(trace_profile_buf); | ||
611 | |||
612 | if (!raw_data) | ||
613 | goto end; | ||
614 | |||
615 | raw_data = per_cpu_ptr(raw_data, cpu); | ||
616 | |||
617 | /* zero the dead bytes from align to not leak stack to user */ | ||
618 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
619 | |||
620 | rec = (struct syscall_trace_exit *)raw_data; | ||
478 | 621 | ||
479 | perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); | 622 | tracing_generic_entry_update(&rec->ent, 0, 0); |
623 | rec->ent.type = sys_data->exit_id; | ||
624 | rec->nr = syscall_nr; | ||
625 | rec->ret = syscall_get_return_value(current, regs); | ||
626 | |||
627 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); | ||
628 | |||
629 | end: | ||
630 | local_irq_restore(flags); | ||
480 | } | 631 | } |
481 | 632 | ||
482 | int reg_prof_syscall_exit(char *name) | 633 | int reg_prof_syscall_exit(char *name) |