aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-10-01 05:20:33 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-01 05:20:48 -0400
commit0aa73ba1c4e1ad1d51a29e0df95ccd9f746918b6 (patch)
treef0714ddcd02812b4fbe3b5405df9e4068f5587e2 /kernel/trace
parent925936ebf35a95c290e010b784c962164e6728f3 (diff)
parent33974093c024f08caadd2fc71a83bd811ed1831d (diff)
Merge branch 'tracing/urgent' into tracing/core
Merge reason: Pick up latest fixes and update to latest upstream. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c58
-rw-r--r--kernel/trace/power-traces.c20
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c70
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_entries.h17
-rw-r--r--kernel/trace/trace_event_profile.c82
-rw-r--r--kernel/trace/trace_events.c56
-rw-r--r--kernel/trace/trace_hw_branches.c2
-rw-r--r--kernel/trace/trace_power.c218
-rw-r--r--kernel/trace/trace_printk.c1
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c99
15 files changed, 255 insertions, 381 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e71634604400..b416512ad17f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP
83# This allows those options to appear when no other tracer is selected. But the 83# This allows those options to appear when no other tracer is selected. But the
84# options do not appear when something else selects it. We need the two options 84# options do not appear when something else selects it. We need the two options
85# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 85# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
86# hidding of the automatic options options. 86# hidding of the automatic options.
87 87
88config TRACING 88config TRACING
89 bool 89 bool
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 844164dca90a..26f03ac07c2b 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
42obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o 42obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
43obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 43obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
44obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o 44obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
45obj-$(CONFIG_POWER_TRACER) += trace_power.o
46obj-$(CONFIG_KMEMTRACE) += kmemtrace.o 45obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
47obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o 46obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
48obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o 47obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
@@ -54,5 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o
54obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
55obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o 54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
56obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
56obj-$(CONFIG_EVENT_TRACING) += power-traces.o
57 57
58libftrace-y := ftrace.o 58libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ddf23a225b52..9a72853a8f0a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
@@ -1520,7 +1524,7 @@ static int t_show(struct seq_file *m, void *v)
1520 return 0; 1524 return 0;
1521} 1525}
1522 1526
1523static struct seq_operations show_ftrace_seq_ops = { 1527static const struct seq_operations show_ftrace_seq_ops = {
1524 .start = t_start, 1528 .start = t_start,
1525 .next = t_next, 1529 .next = t_next,
1526 .stop = t_stop, 1530 .stop = t_stop,
@@ -1621,8 +1625,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1621 if (!ret) { 1625 if (!ret) {
1622 struct seq_file *m = file->private_data; 1626 struct seq_file *m = file->private_data;
1623 m->private = iter; 1627 m->private = iter;
1624 } else 1628 } else {
1629 trace_parser_put(&iter->parser);
1625 kfree(iter); 1630 kfree(iter);
1631 }
1626 } else 1632 } else
1627 file->private_data = iter; 1633 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock); 1634 mutex_unlock(&ftrace_regex_lock);
@@ -2148,7 +2154,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2148 struct trace_parser *parser; 2154 struct trace_parser *parser;
2149 ssize_t ret, read; 2155 ssize_t ret, read;
2150 2156
2151 if (!cnt || cnt < 0) 2157 if (!cnt)
2152 return 0; 2158 return 0;
2153 2159
2154 mutex_lock(&ftrace_regex_lock); 2160 mutex_lock(&ftrace_regex_lock);
@@ -2162,7 +2168,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2162 parser = &iter->parser; 2168 parser = &iter->parser;
2163 read = trace_get_user(parser, ubuf, cnt, ppos); 2169 read = trace_get_user(parser, ubuf, cnt, ppos);
2164 2170
2165 if (trace_parser_loaded(parser) && 2171 if (read >= 0 && trace_parser_loaded(parser) &&
2166 !trace_parser_cont(parser)) { 2172 !trace_parser_cont(parser)) {
2167 ret = ftrace_process_regex(parser->buffer, 2173 ret = ftrace_process_regex(parser->buffer,
2168 parser->idx, enable); 2174 parser->idx, enable);
@@ -2360,11 +2366,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2360static void * 2366static void *
2361__g_next(struct seq_file *m, loff_t *pos) 2367__g_next(struct seq_file *m, loff_t *pos)
2362{ 2368{
2363 unsigned long *array = m->private;
2364
2365 if (*pos >= ftrace_graph_count) 2369 if (*pos >= ftrace_graph_count)
2366 return NULL; 2370 return NULL;
2367 return &array[*pos]; 2371 return &ftrace_graph_funcs[*pos];
2368} 2372}
2369 2373
2370static void * 2374static void *
@@ -2407,7 +2411,7 @@ static int g_show(struct seq_file *m, void *v)
2407 return 0; 2411 return 0;
2408} 2412}
2409 2413
2410static struct seq_operations ftrace_graph_seq_ops = { 2414static const struct seq_operations ftrace_graph_seq_ops = {
2411 .start = g_start, 2415 .start = g_start,
2412 .next = g_next, 2416 .next = g_next,
2413 .stop = g_stop, 2417 .stop = g_stop,
@@ -2428,16 +2432,10 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2428 ftrace_graph_count = 0; 2432 ftrace_graph_count = 0;
2429 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2433 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2430 } 2434 }
2435 mutex_unlock(&graph_lock);
2431 2436
2432 if (file->f_mode & FMODE_READ) { 2437 if (file->f_mode & FMODE_READ)
2433 ret = seq_open(file, &ftrace_graph_seq_ops); 2438 ret = seq_open(file, &ftrace_graph_seq_ops);
2434 if (!ret) {
2435 struct seq_file *m = file->private_data;
2436 m->private = ftrace_graph_funcs;
2437 }
2438 } else
2439 file->private_data = ftrace_graph_funcs;
2440 mutex_unlock(&graph_lock);
2441 2439
2442 return ret; 2440 return ret;
2443} 2441}
@@ -2506,9 +2504,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2506 size_t cnt, loff_t *ppos) 2504 size_t cnt, loff_t *ppos)
2507{ 2505{
2508 struct trace_parser parser; 2506 struct trace_parser parser;
2509 unsigned long *array; 2507 ssize_t read, ret;
2510 size_t read = 0;
2511 ssize_t ret;
2512 2508
2513 if (!cnt || cnt < 0) 2509 if (!cnt || cnt < 0)
2514 return 0; 2510 return 0;
@@ -2517,35 +2513,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2517 2513
2518 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { 2514 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2519 ret = -EBUSY; 2515 ret = -EBUSY;
2520 goto out; 2516 goto out_unlock;
2521 } 2517 }
2522 2518
2523 if (file->f_mode & FMODE_READ) {
2524 struct seq_file *m = file->private_data;
2525 array = m->private;
2526 } else
2527 array = file->private_data;
2528
2529 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2519 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2530 ret = -ENOMEM; 2520 ret = -ENOMEM;
2531 goto out; 2521 goto out_unlock;
2532 } 2522 }
2533 2523
2534 read = trace_get_user(&parser, ubuf, cnt, ppos); 2524 read = trace_get_user(&parser, ubuf, cnt, ppos);
2535 2525
2536 if (trace_parser_loaded((&parser))) { 2526 if (read >= 0 && trace_parser_loaded((&parser))) {
2537 parser.buffer[parser.idx] = 0; 2527 parser.buffer[parser.idx] = 0;
2538 2528
2539 /* we allow only one expression at a time */ 2529 /* we allow only one expression at a time */
2540 ret = ftrace_set_func(array, &ftrace_graph_count, 2530 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2541 parser.buffer); 2531 parser.buffer);
2542 if (ret) 2532 if (ret)
2543 goto out; 2533 goto out_free;
2544 } 2534 }
2545 2535
2546 ret = read; 2536 ret = read;
2547 out: 2537
2538out_free:
2548 trace_parser_put(&parser); 2539 trace_parser_put(&parser);
2540out_unlock:
2549 mutex_unlock(&graph_lock); 2541 mutex_unlock(&graph_lock);
2550 2542
2551 return ret; 2543 return ret;
@@ -2976,7 +2968,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
2976 2968
2977int 2969int
2978ftrace_enable_sysctl(struct ctl_table *table, int write, 2970ftrace_enable_sysctl(struct ctl_table *table, int write,
2979 struct file *file, void __user *buffer, size_t *lenp, 2971 void __user *buffer, size_t *lenp,
2980 loff_t *ppos) 2972 loff_t *ppos)
2981{ 2973{
2982 int ret; 2974 int ret;
@@ -2986,7 +2978,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
2986 2978
2987 mutex_lock(&ftrace_lock); 2979 mutex_lock(&ftrace_lock);
2988 2980
2989 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 2981 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2990 2982
2991 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 2983 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
2992 goto out; 2984 goto out;
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
new file mode 100644
index 000000000000..e06c6e3d56a3
--- /dev/null
+++ b/kernel/trace/power-traces.c
@@ -0,0 +1,20 @@
1/*
2 * Power trace points
3 *
4 * Copyright (C) 2009 Arjan van de Ven <arjan@linux.intel.com>
5 */
6
7#include <linux/string.h>
8#include <linux/types.h>
9#include <linux/workqueue.h>
10#include <linux/sched.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13
14#define CREATE_TRACE_POINTS
15#include <trace/events/power.h>
16
17EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
18EXPORT_TRACEPOINT_SYMBOL_GPL(power_end);
19EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
20
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6eef38923b07..d4ff01970547 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -201,8 +201,6 @@ int tracing_is_on(void)
201} 201}
202EXPORT_SYMBOL_GPL(tracing_is_on); 202EXPORT_SYMBOL_GPL(tracing_is_on);
203 203
204#include "trace.h"
205
206#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
207#define RB_ALIGNMENT 4U 205#define RB_ALIGNMENT 4U
208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fd52a19dd172..45068269ebb1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
125 125
126static int tracing_set_tracer(const char *buf); 126static int tracing_set_tracer(const char *buf);
127 127
128#define BOOTUP_TRACER_SIZE 100 128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */ 136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
242static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
243 243
244/* 244/*
245 * max_tracer_type_len is used to simplify the allocating of
246 * buffers to read userspace tracer names. We keep track of
247 * the longest tracer name registered.
248 */
249static int max_tracer_type_len;
250
251/*
252 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
253 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
254 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
275 */ 268 */
276void trace_wake_up(void) 269void trace_wake_up(void)
277{ 270{
271 int cpu;
272
273 if (trace_flags & TRACE_ITER_BLOCK)
274 return;
278 /* 275 /*
279 * The runqueue_is_locked() can fail, but this is the best we 276 * The runqueue_is_locked() can fail, but this is the best we
280 * have for now: 277 * have for now:
281 */ 278 */
282 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) 279 cpu = get_cpu();
280 if (!runqueue_is_locked(cpu))
283 wake_up(&trace_wait); 281 wake_up(&trace_wait);
282 put_cpu();
284} 283}
285 284
286static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)
@@ -416,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
416 415
417 /* read the non-space input */ 416 /* read the non-space input */
418 while (cnt && !isspace(ch)) { 417 while (cnt && !isspace(ch)) {
419 if (parser->idx < parser->size) 418 if (parser->idx < parser->size - 1)
420 parser->buffer[parser->idx++] = ch; 419 parser->buffer[parser->idx++] = ch;
421 else { 420 else {
422 ret = -EINVAL; 421 ret = -EINVAL;
@@ -619,7 +618,6 @@ __releases(kernel_lock)
619__acquires(kernel_lock) 618__acquires(kernel_lock)
620{ 619{
621 struct tracer *t; 620 struct tracer *t;
622 int len;
623 int ret = 0; 621 int ret = 0;
624 622
625 if (!type->name) { 623 if (!type->name) {
@@ -627,6 +625,11 @@ __acquires(kernel_lock)
627 return -1; 625 return -1;
628 } 626 }
629 627
628 if (strlen(type->name) > MAX_TRACER_SIZE) {
629 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
630 return -1;
631 }
632
630 /* 633 /*
631 * When this gets called we hold the BKL which means that 634 * When this gets called we hold the BKL which means that
632 * preemption is disabled. Various trace selftests however 635 * preemption is disabled. Various trace selftests however
@@ -641,7 +644,7 @@ __acquires(kernel_lock)
641 for (t = trace_types; t; t = t->next) { 644 for (t = trace_types; t; t = t->next) {
642 if (strcmp(type->name, t->name) == 0) { 645 if (strcmp(type->name, t->name) == 0) {
643 /* already found */ 646 /* already found */
644 pr_info("Trace %s already registered\n", 647 pr_info("Tracer %s already registered\n",
645 type->name); 648 type->name);
646 ret = -1; 649 ret = -1;
647 goto out; 650 goto out;
@@ -692,9 +695,6 @@ __acquires(kernel_lock)
692 695
693 type->next = trace_types; 696 type->next = trace_types;
694 trace_types = type; 697 trace_types = type;
695 len = strlen(type->name);
696 if (len > max_tracer_type_len)
697 max_tracer_type_len = len;
698 698
699 out: 699 out:
700 tracing_selftest_running = false; 700 tracing_selftest_running = false;
@@ -703,7 +703,7 @@ __acquires(kernel_lock)
703 if (ret || !default_bootup_tracer) 703 if (ret || !default_bootup_tracer)
704 goto out_unlock; 704 goto out_unlock;
705 705
706 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) 706 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
707 goto out_unlock; 707 goto out_unlock;
708 708
709 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 709 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -725,14 +725,13 @@ __acquires(kernel_lock)
725void unregister_tracer(struct tracer *type) 725void unregister_tracer(struct tracer *type)
726{ 726{
727 struct tracer **t; 727 struct tracer **t;
728 int len;
729 728
730 mutex_lock(&trace_types_lock); 729 mutex_lock(&trace_types_lock);
731 for (t = &trace_types; *t; t = &(*t)->next) { 730 for (t = &trace_types; *t; t = &(*t)->next) {
732 if (*t == type) 731 if (*t == type)
733 goto found; 732 goto found;
734 } 733 }
735 pr_info("Trace %s not registered\n", type->name); 734 pr_info("Tracer %s not registered\n", type->name);
736 goto out; 735 goto out;
737 736
738 found: 737 found:
@@ -745,17 +744,7 @@ void unregister_tracer(struct tracer *type)
745 current_trace->stop(&global_trace); 744 current_trace->stop(&global_trace);
746 current_trace = &nop_trace; 745 current_trace = &nop_trace;
747 } 746 }
748 747out:
749 if (strlen(type->name) != max_tracer_type_len)
750 goto out;
751
752 max_tracer_type_len = 0;
753 for (t = &trace_types; *t; t = &(*t)->next) {
754 len = strlen((*t)->name);
755 if (len > max_tracer_type_len)
756 max_tracer_type_len = len;
757 }
758 out:
759 mutex_unlock(&trace_types_lock); 748 mutex_unlock(&trace_types_lock);
760} 749}
761 750
@@ -1960,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
1960 return 0; 1949 return 0;
1961} 1950}
1962 1951
1963static struct seq_operations tracer_seq_ops = { 1952static const struct seq_operations tracer_seq_ops = {
1964 .start = s_start, 1953 .start = s_start,
1965 .next = s_next, 1954 .next = s_next,
1966 .stop = s_stop, 1955 .stop = s_stop,
@@ -1995,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
1995 if (current_trace) 1984 if (current_trace)
1996 *iter->trace = *current_trace; 1985 *iter->trace = *current_trace;
1997 1986
1998 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1987 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
1999 goto fail; 1988 goto fail;
2000 1989
2001 cpumask_clear(iter->started);
2002
2003 if (current_trace && current_trace->print_max) 1990 if (current_trace && current_trace->print_max)
2004 iter->tr = &max_tr; 1991 iter->tr = &max_tr;
2005 else 1992 else
@@ -2174,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v)
2174 return 0; 2161 return 0;
2175} 2162}
2176 2163
2177static struct seq_operations show_traces_seq_ops = { 2164static const struct seq_operations show_traces_seq_ops = {
2178 .start = t_start, 2165 .start = t_start,
2179 .next = t_next, 2166 .next = t_next,
2180 .stop = t_stop, 2167 .stop = t_stop,
@@ -2604,7 +2591,7 @@ static ssize_t
2604tracing_set_trace_read(struct file *filp, char __user *ubuf, 2591tracing_set_trace_read(struct file *filp, char __user *ubuf,
2605 size_t cnt, loff_t *ppos) 2592 size_t cnt, loff_t *ppos)
2606{ 2593{
2607 char buf[max_tracer_type_len+2]; 2594 char buf[MAX_TRACER_SIZE+2];
2608 int r; 2595 int r;
2609 2596
2610 mutex_lock(&trace_types_lock); 2597 mutex_lock(&trace_types_lock);
@@ -2754,15 +2741,15 @@ static ssize_t
2754tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2741tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2755 size_t cnt, loff_t *ppos) 2742 size_t cnt, loff_t *ppos)
2756{ 2743{
2757 char buf[max_tracer_type_len+1]; 2744 char buf[MAX_TRACER_SIZE+1];
2758 int i; 2745 int i;
2759 size_t ret; 2746 size_t ret;
2760 int err; 2747 int err;
2761 2748
2762 ret = cnt; 2749 ret = cnt;
2763 2750
2764 if (cnt > max_tracer_type_len) 2751 if (cnt > MAX_TRACER_SIZE)
2765 cnt = max_tracer_type_len; 2752 cnt = MAX_TRACER_SIZE;
2766 2753
2767 if (copy_from_user(&buf, ubuf, cnt)) 2754 if (copy_from_user(&buf, ubuf, cnt))
2768 return -EFAULT; 2755 return -EFAULT;
@@ -4400,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
4400 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4387 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4401 goto out_free_buffer_mask; 4388 goto out_free_buffer_mask;
4402 4389
4403 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4390 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4404 goto out_free_tracing_cpumask; 4391 goto out_free_tracing_cpumask;
4405 4392
4406 /* To save memory, keep the ring buffer size to its minimum */ 4393 /* To save memory, keep the ring buffer size to its minimum */
@@ -4411,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
4411 4398
4412 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4399 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4413 cpumask_copy(tracing_cpumask, cpu_all_mask); 4400 cpumask_copy(tracing_cpumask, cpu_all_mask);
4414 cpumask_clear(tracing_reader_cpumask);
4415 4401
4416 /* TODO: make the number of buffers hot pluggable with CPUS */ 4402 /* TODO: make the number of buffers hot pluggable with CPUS */
4417 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4403 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index db6b83edd49b..365fb19d9e11 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -11,7 +11,6 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <trace/boot.h> 12#include <trace/boot.h>
13#include <linux/kmemtrace.h> 13#include <linux/kmemtrace.h>
14#include <trace/power.h>
15 14
16#include <linux/trace_seq.h> 15#include <linux/trace_seq.h>
17#include <linux/ftrace_event.h> 16#include <linux/ftrace_event.h>
@@ -37,7 +36,6 @@ enum trace_type {
37 TRACE_HW_BRANCHES, 36 TRACE_HW_BRANCHES,
38 TRACE_KMEM_ALLOC, 37 TRACE_KMEM_ALLOC,
39 TRACE_KMEM_FREE, 38 TRACE_KMEM_FREE,
40 TRACE_POWER,
41 TRACE_BLK, 39 TRACE_BLK,
42 40
43 __TRACE_LAST_TYPE, 41 __TRACE_LAST_TYPE,
@@ -207,7 +205,6 @@ extern void __ftrace_bad_type(void);
207 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 205 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
208 TRACE_GRAPH_RET); \ 206 TRACE_GRAPH_RET); \
209 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 207 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
210 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
211 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ 208 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
212 TRACE_KMEM_ALLOC); \ 209 TRACE_KMEM_ALLOC); \
213 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 210 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index a431748ddd6e..ead3d724599d 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -330,23 +330,6 @@ FTRACE_ENTRY(hw_branch, hw_branch_entry,
330 F_printk("from: %llx to: %llx", __entry->from, __entry->to) 330 F_printk("from: %llx to: %llx", __entry->from, __entry->to)
331); 331);
332 332
333FTRACE_ENTRY(power, trace_power,
334
335 TRACE_POWER,
336
337 F_STRUCT(
338 __field_struct( struct power_trace, state_data )
339 __field_desc( s64, state_data, stamp )
340 __field_desc( s64, state_data, end )
341 __field_desc( int, state_data, type )
342 __field_desc( int, state_data, state )
343 ),
344
345 F_printk("%llx->%llx type:%u state:%u",
346 __entry->stamp, __entry->end,
347 __entry->type, __entry->state)
348);
349
350FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, 333FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
351 334
352 TRACE_KMEM_ALLOC, 335 TRACE_KMEM_ALLOC,
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 55a25c933d15..dd44b8768867 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -8,6 +8,57 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include "trace.h" 9#include "trace.h"
10 10
11/*
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
14 */
15typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17char *trace_profile_buf;
18EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20char *trace_profile_buf_nmi;
21EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
22
23/* Count the events in use (per event id, not per instance) */
24static int total_profile_count;
25
26static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27{
28 char *buf;
29 int ret = -ENOMEM;
30
31 if (atomic_inc_return(&event->profile_count))
32 return 0;
33
34 if (!total_profile_count++) {
35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf)
37 goto fail_buf;
38
39 rcu_assign_pointer(trace_profile_buf, buf);
40
41 buf = (char *)alloc_percpu(profile_buf_t);
42 if (!buf)
43 goto fail_buf_nmi;
44
45 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46 }
47
48 ret = event->profile_enable();
49 if (!ret)
50 return 0;
51
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi:
54 kfree(trace_profile_buf);
55fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count);
58
59 return ret;
60}
61
11int ftrace_profile_enable(int event_id) 62int ftrace_profile_enable(int event_id)
12{ 63{
13 struct ftrace_event_call *event; 64 struct ftrace_event_call *event;
@@ -17,7 +68,7 @@ int ftrace_profile_enable(int event_id)
17 list_for_each_entry(event, &ftrace_events, list) { 68 list_for_each_entry(event, &ftrace_events, list) {
18 if (event->id == event_id && event->profile_enable && 69 if (event->id == event_id && event->profile_enable &&
19 try_module_get(event->mod)) { 70 try_module_get(event->mod)) {
20 ret = event->profile_enable(event); 71 ret = ftrace_profile_enable_event(event);
21 break; 72 break;
22 } 73 }
23 } 74 }
@@ -26,6 +77,33 @@ int ftrace_profile_enable(int event_id)
26 return ret; 77 return ret;
27} 78}
28 79
80static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
82 char *buf, *nmi_buf;
83
84 if (!atomic_add_negative(-1, &event->profile_count))
85 return;
86
87 event->profile_disable();
88
89 if (!--total_profile_count) {
90 buf = trace_profile_buf;
91 rcu_assign_pointer(trace_profile_buf, NULL);
92
93 nmi_buf = trace_profile_buf_nmi;
94 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
95
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
105}
106
29void ftrace_profile_disable(int event_id) 107void ftrace_profile_disable(int event_id)
30{ 108{
31 struct ftrace_event_call *event; 109 struct ftrace_event_call *event;
@@ -33,7 +111,7 @@ void ftrace_profile_disable(int event_id)
33 mutex_lock(&event_mutex); 111 mutex_lock(&event_mutex);
34 list_for_each_entry(event, &ftrace_events, list) { 112 list_for_each_entry(event, &ftrace_events, list) {
35 if (event->id == event_id) { 113 if (event->id == event_id) {
36 event->profile_disable(event); 114 ftrace_profile_disable_event(event);
37 module_put(event->mod); 115 module_put(event->mod);
38 break; 116 break;
39 } 117 }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8c91b7c8f047..5e9ffc33f6db 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
232 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
233{ 233{
234 struct trace_parser parser; 234 struct trace_parser parser;
235 size_t read = 0; 235 ssize_t read, ret;
236 ssize_t ret;
237 236
238 if (!cnt || cnt < 0) 237 if (!cnt)
239 return 0; 238 return 0;
240 239
241 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
247 246
248 read = trace_get_user(&parser, ubuf, cnt, ppos); 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
249 248
250 if (trace_parser_loaded((&parser))) { 249 if (read >= 0 && trace_parser_loaded((&parser))) {
251 int set = 1; 250 int set = 1;
252 251
253 if (*parser.buffer == '!') 252 if (*parser.buffer == '!')
@@ -271,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
271static void * 270static void *
272t_next(struct seq_file *m, void *v, loff_t *pos) 271t_next(struct seq_file *m, void *v, loff_t *pos)
273{ 272{
274 struct list_head *list = m->private; 273 struct ftrace_event_call *call = v;
275 struct ftrace_event_call *call;
276 274
277 (*pos)++; 275 (*pos)++;
278 276
279 for (;;) { 277 list_for_each_entry_continue(call, &ftrace_events, list) {
280 if (list == &ftrace_events)
281 return NULL;
282
283 call = list_entry(list, struct ftrace_event_call, list);
284
285 /* 278 /*
286 * The ftrace subsystem is for showing formats only. 279 * The ftrace subsystem is for showing formats only.
287 * They can not be enabled or disabled via the event files. 280 * They can not be enabled or disabled via the event files.
288 */ 281 */
289 if (call->regfunc) 282 if (call->regfunc)
290 break; 283 return call;
291
292 list = list->next;
293 } 284 }
294 285
295 m->private = list->next; 286 return NULL;
296
297 return call;
298} 287}
299 288
300static void *t_start(struct seq_file *m, loff_t *pos) 289static void *t_start(struct seq_file *m, loff_t *pos)
301{ 290{
302 struct ftrace_event_call *call = NULL; 291 struct ftrace_event_call *call;
303 loff_t l; 292 loff_t l;
304 293
305 mutex_lock(&event_mutex); 294 mutex_lock(&event_mutex);
306 295
307 m->private = ftrace_events.next; 296 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
308 for (l = 0; l <= *pos; ) { 297 for (l = 0; l <= *pos; ) {
309 call = t_next(m, NULL, &l); 298 call = t_next(m, call, &l);
310 if (!call) 299 if (!call)
311 break; 300 break;
312 } 301 }
@@ -316,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
316static void * 305static void *
317s_next(struct seq_file *m, void *v, loff_t *pos) 306s_next(struct seq_file *m, void *v, loff_t *pos)
318{ 307{
319 struct list_head *list = m->private; 308 struct ftrace_event_call *call = v;
320 struct ftrace_event_call *call;
321 309
322 (*pos)++; 310 (*pos)++;
323 311
324 retry: 312 list_for_each_entry_continue(call, &ftrace_events, list) {
325 if (list == &ftrace_events) 313 if (call->enabled)
326 return NULL; 314 return call;
327
328 call = list_entry(list, struct ftrace_event_call, list);
329
330 if (!call->enabled) {
331 list = list->next;
332 goto retry;
333 } 315 }
334 316
335 m->private = list->next; 317 return NULL;
336
337 return call;
338} 318}
339 319
340static void *s_start(struct seq_file *m, loff_t *pos) 320static void *s_start(struct seq_file *m, loff_t *pos)
341{ 321{
342 struct ftrace_event_call *call = NULL; 322 struct ftrace_event_call *call;
343 loff_t l; 323 loff_t l;
344 324
345 mutex_lock(&event_mutex); 325 mutex_lock(&event_mutex);
346 326
347 m->private = ftrace_events.next; 327 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
348 for (l = 0; l <= *pos; ) { 328 for (l = 0; l <= *pos; ) {
349 call = s_next(m, NULL, &l); 329 call = s_next(m, call, &l);
350 if (!call) 330 if (!call)
351 break; 331 break;
352 } 332 }
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ca7d7c4d0c2a..23b63859130e 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
155 seq_print_ip_sym(seq, it->from, symflags) && 155 seq_print_ip_sym(seq, it->from, symflags) &&
156 trace_seq_printf(seq, "\n")) 156 trace_seq_printf(seq, "\n"))
157 return TRACE_TYPE_HANDLED; 157 return TRACE_TYPE_HANDLED;
158 return TRACE_TYPE_PARTIAL_LINE;; 158 return TRACE_TYPE_PARTIAL_LINE;
159 } 159 }
160 return TRACE_TYPE_UNHANDLED; 160 return TRACE_TYPE_UNHANDLED;
161} 161}
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
deleted file mode 100644
index fe1a00f1445a..000000000000
--- a/kernel/trace/trace_power.c
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2 * ring buffer based C-state tracer
3 *
4 * Arjan van de Ven <arjan@linux.intel.com>
5 * Copyright (C) 2008 Intel Corporation
6 *
7 * Much is borrowed from trace_boot.c which is
8 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 *
10 */
11
12#include <linux/init.h>
13#include <linux/debugfs.h>
14#include <trace/power.h>
15#include <linux/kallsyms.h>
16#include <linux/module.h>
17
18#include "trace.h"
19#include "trace_output.h"
20
21static struct trace_array *power_trace;
22static int __read_mostly trace_power_enabled;
23
24static void probe_power_start(struct power_trace *it, unsigned int type,
25 unsigned int level)
26{
27 if (!trace_power_enabled)
28 return;
29
30 memset(it, 0, sizeof(struct power_trace));
31 it->state = level;
32 it->type = type;
33 it->stamp = ktime_get();
34}
35
36
37static void probe_power_end(struct power_trace *it)
38{
39 struct ftrace_event_call *call = &event_power;
40 struct ring_buffer_event *event;
41 struct ring_buffer *buffer;
42 struct trace_power *entry;
43 struct trace_array_cpu *data;
44 struct trace_array *tr = power_trace;
45
46 if (!trace_power_enabled)
47 return;
48
49 buffer = tr->buffer;
50
51 preempt_disable();
52 it->end = ktime_get();
53 data = tr->data[smp_processor_id()];
54
55 event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
56 sizeof(*entry), 0, 0);
57 if (!event)
58 goto out;
59 entry = ring_buffer_event_data(event);
60 entry->state_data = *it;
61 if (!filter_check_discard(call, entry, buffer, event))
62 trace_buffer_unlock_commit(buffer, event, 0, 0);
63 out:
64 preempt_enable();
65}
66
67static void probe_power_mark(struct power_trace *it, unsigned int type,
68 unsigned int level)
69{
70 struct ftrace_event_call *call = &event_power;
71 struct ring_buffer_event *event;
72 struct ring_buffer *buffer;
73 struct trace_power *entry;
74 struct trace_array_cpu *data;
75 struct trace_array *tr = power_trace;
76
77 if (!trace_power_enabled)
78 return;
79
80 buffer = tr->buffer;
81
82 memset(it, 0, sizeof(struct power_trace));
83 it->state = level;
84 it->type = type;
85 it->stamp = ktime_get();
86 preempt_disable();
87 it->end = it->stamp;
88 data = tr->data[smp_processor_id()];
89
90 event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
91 sizeof(*entry), 0, 0);
92 if (!event)
93 goto out;
94 entry = ring_buffer_event_data(event);
95 entry->state_data = *it;
96 if (!filter_check_discard(call, entry, buffer, event))
97 trace_buffer_unlock_commit(buffer, event, 0, 0);
98 out:
99 preempt_enable();
100}
101
102static int tracing_power_register(void)
103{
104 int ret;
105
106 ret = register_trace_power_start(probe_power_start);
107 if (ret) {
108 pr_info("power trace: Couldn't activate tracepoint"
109 " probe to trace_power_start\n");
110 return ret;
111 }
112 ret = register_trace_power_end(probe_power_end);
113 if (ret) {
114 pr_info("power trace: Couldn't activate tracepoint"
115 " probe to trace_power_end\n");
116 goto fail_start;
117 }
118 ret = register_trace_power_mark(probe_power_mark);
119 if (ret) {
120 pr_info("power trace: Couldn't activate tracepoint"
121 " probe to trace_power_mark\n");
122 goto fail_end;
123 }
124 return ret;
125fail_end:
126 unregister_trace_power_end(probe_power_end);
127fail_start:
128 unregister_trace_power_start(probe_power_start);
129 return ret;
130}
131
132static void start_power_trace(struct trace_array *tr)
133{
134 trace_power_enabled = 1;
135}
136
137static void stop_power_trace(struct trace_array *tr)
138{
139 trace_power_enabled = 0;
140}
141
142static void power_trace_reset(struct trace_array *tr)
143{
144 trace_power_enabled = 0;
145 unregister_trace_power_start(probe_power_start);
146 unregister_trace_power_end(probe_power_end);
147 unregister_trace_power_mark(probe_power_mark);
148}
149
150
151static int power_trace_init(struct trace_array *tr)
152{
153 power_trace = tr;
154
155 trace_power_enabled = 1;
156 tracing_power_register();
157
158 tracing_reset_online_cpus(tr);
159 return 0;
160}
161
162static enum print_line_t power_print_line(struct trace_iterator *iter)
163{
164 int ret = 0;
165 struct trace_entry *entry = iter->ent;
166 struct trace_power *field ;
167 struct power_trace *it;
168 struct trace_seq *s = &iter->seq;
169 struct timespec stamp;
170 struct timespec duration;
171
172 trace_assign_type(field, entry);
173 it = &field->state_data;
174 stamp = ktime_to_timespec(it->stamp);
175 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
176
177 if (entry->type == TRACE_POWER) {
178 if (it->type == POWER_CSTATE)
179 ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
180 stamp.tv_sec,
181 stamp.tv_nsec,
182 it->state, iter->cpu,
183 duration.tv_sec,
184 duration.tv_nsec);
185 if (it->type == POWER_PSTATE)
186 ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
187 stamp.tv_sec,
188 stamp.tv_nsec,
189 it->state, iter->cpu);
190 if (!ret)
191 return TRACE_TYPE_PARTIAL_LINE;
192 return TRACE_TYPE_HANDLED;
193 }
194 return TRACE_TYPE_UNHANDLED;
195}
196
197static void power_print_header(struct seq_file *s)
198{
199 seq_puts(s, "# TIMESTAMP STATE EVENT\n");
200 seq_puts(s, "# | | |\n");
201}
202
203static struct tracer power_tracer __read_mostly =
204{
205 .name = "power",
206 .init = power_trace_init,
207 .start = start_power_trace,
208 .stop = stop_power_trace,
209 .reset = power_trace_reset,
210 .print_line = power_print_line,
211 .print_header = power_print_header,
212};
213
214static int init_power_trace(void)
215{
216 return register_tracer(&power_tracer);
217}
218device_initcall(init_power_trace);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 687699d365ae..2547d8813cf0 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -11,7 +11,6 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/marker.h>
15#include <linux/mutex.h> 14#include <linux/mutex.h>
16#include <linux/ctype.h> 15#include <linux/ctype.h>
17#include <linux/list.h> 16#include <linux/list.h>
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0f6facb050a1..8504ac71e4e8 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = {
296 296
297int 297int
298stack_trace_sysctl(struct ctl_table *table, int write, 298stack_trace_sysctl(struct ctl_table *table, int write,
299 struct file *file, void __user *buffer, size_t *lenp, 299 void __user *buffer, size_t *lenp,
300 loff_t *ppos) 300 loff_t *ppos)
301{ 301{
302 int ret; 302 int ret;
303 303
304 mutex_lock(&stack_sysctl_mutex); 304 mutex_lock(&stack_sysctl_mutex);
305 305
306 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 306 ret = proc_dointvec(table, write, buffer, lenp, ppos);
307 307
308 if (ret || !write || 308 if (ret || !write ||
309 (last_stack_tracer_enabled == !!stack_tracer_enabled)) 309 (last_stack_tracer_enabled == !!stack_tracer_enabled))
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8712ce3c6a0e..9fbce6c9d2e1 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h> 4#include <linux/ftrace.h>
5#include <linux/perf_counter.h> 5#include <linux/perf_event.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8#include "trace_output.h" 8#include "trace_output.h"
@@ -384,10 +384,13 @@ static int sys_prof_refcount_exit;
384 384
385static void prof_syscall_enter(struct pt_regs *regs, long id) 385static void prof_syscall_enter(struct pt_regs *regs, long id)
386{ 386{
387 struct syscall_trace_enter *rec;
388 struct syscall_metadata *sys_data; 387 struct syscall_metadata *sys_data;
388 struct syscall_trace_enter *rec;
389 unsigned long flags;
390 char *raw_data;
389 int syscall_nr; 391 int syscall_nr;
390 int size; 392 int size;
393 int cpu;
391 394
392 syscall_nr = syscall_get_nr(current, regs); 395 syscall_nr = syscall_get_nr(current, regs);
393 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 396 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -402,20 +405,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
402 size = ALIGN(size + sizeof(u32), sizeof(u64)); 405 size = ALIGN(size + sizeof(u32), sizeof(u64));
403 size -= sizeof(u32); 406 size -= sizeof(u32);
404 407
405 do { 408 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
406 char raw_data[size]; 409 "profile buffer not large enough"))
410 return;
411
412 /* Protect the per cpu buffer, begin the rcu read side */
413 local_irq_save(flags);
407 414
408 /* zero the dead bytes from align to not leak stack to user */ 415 cpu = smp_processor_id();
409 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 416
417 if (in_nmi())
418 raw_data = rcu_dereference(trace_profile_buf_nmi);
419 else
420 raw_data = rcu_dereference(trace_profile_buf);
421
422 if (!raw_data)
423 goto end;
410 424
411 rec = (struct syscall_trace_enter *) raw_data; 425 raw_data = per_cpu_ptr(raw_data, cpu);
412 tracing_generic_entry_update(&rec->ent, 0, 0); 426
413 rec->ent.type = sys_data->enter_id; 427 /* zero the dead bytes from align to not leak stack to user */
414 rec->nr = syscall_nr; 428 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
415 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 429
416 (unsigned long *)&rec->args); 430 rec = (struct syscall_trace_enter *) raw_data;
417 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 431 tracing_generic_entry_update(&rec->ent, 0, 0);
418 } while(0); 432 rec->ent.type = sys_data->enter_id;
433 rec->nr = syscall_nr;
434 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
435 (unsigned long *)&rec->args);
436 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
437
438end:
439 local_irq_restore(flags);
419} 440}
420 441
421int reg_prof_syscall_enter(char *name) 442int reg_prof_syscall_enter(char *name)
@@ -460,8 +481,12 @@ void unreg_prof_syscall_enter(char *name)
460static void prof_syscall_exit(struct pt_regs *regs, long ret) 481static void prof_syscall_exit(struct pt_regs *regs, long ret)
461{ 482{
462 struct syscall_metadata *sys_data; 483 struct syscall_metadata *sys_data;
463 struct syscall_trace_exit rec; 484 struct syscall_trace_exit *rec;
485 unsigned long flags;
464 int syscall_nr; 486 int syscall_nr;
487 char *raw_data;
488 int size;
489 int cpu;
465 490
466 syscall_nr = syscall_get_nr(current, regs); 491 syscall_nr = syscall_get_nr(current, regs);
467 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 492 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -471,12 +496,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
471 if (!sys_data) 496 if (!sys_data)
472 return; 497 return;
473 498
474 tracing_generic_entry_update(&rec.ent, 0, 0); 499 /* We can probably do that at build time */
475 rec.ent.type = sys_data->exit_id; 500 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
476 rec.nr = syscall_nr; 501 size -= sizeof(u32);
477 rec.ret = syscall_get_return_value(current, regs);
478 502
479 perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); 503 /*
504 * Impossible, but be paranoid with the future
505 * How to put this check outside runtime?
506 */
507 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
508 "exit event has grown above profile buffer size"))
509 return;
510
511 /* Protect the per cpu buffer, begin the rcu read side */
512 local_irq_save(flags);
513 cpu = smp_processor_id();
514
515 if (in_nmi())
516 raw_data = rcu_dereference(trace_profile_buf_nmi);
517 else
518 raw_data = rcu_dereference(trace_profile_buf);
519
520 if (!raw_data)
521 goto end;
522
523 raw_data = per_cpu_ptr(raw_data, cpu);
524
525 /* zero the dead bytes from align to not leak stack to user */
526 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
527
528 rec = (struct syscall_trace_exit *)raw_data;
529
530 tracing_generic_entry_update(&rec->ent, 0, 0);
531 rec->ent.type = sys_data->exit_id;
532 rec->nr = syscall_nr;
533 rec->ret = syscall_get_return_value(current, regs);
534
535 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
536
537end:
538 local_irq_restore(flags);
480} 539}
481 540
482int reg_prof_syscall_exit(char *name) 541int reg_prof_syscall_exit(char *name)