aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-08-21 05:36:39 -0400
committerIngo Molnar <mingo@kernel.org>2012-08-21 05:36:49 -0400
commita0e0fac633bed47c15cab744663d8c67f8f3421d (patch)
tree585f685aacda161cb00bb0c8599e4f60b3c262c6
parentbcada3d4b8c96b8792c2306f363992ca5ab9da42 (diff)
parent87abb3b15c62033409f5bf2ffb5620c94f91cf2c (diff)
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull ftrace fixlets from Steve Rostedt. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_selftest.c27
4 files changed, 19 insertions, 20 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 49491fa7daa2..b32ed0e385a5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2816,7 +2816,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2816 * to the buffer after this will fail and return NULL. 2816 * to the buffer after this will fail and return NULL.
2817 * 2817 *
2818 * This is different than ring_buffer_record_disable() as 2818 * This is different than ring_buffer_record_disable() as
2819 * it works like an on/off switch, where as the disable() verison 2819 * it works like an on/off switch, where as the disable() version
2820 * must be paired with a enable(). 2820 * must be paired with a enable().
2821 */ 2821 */
2822void ring_buffer_record_off(struct ring_buffer *buffer) 2822void ring_buffer_record_off(struct ring_buffer *buffer)
@@ -2839,7 +2839,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2839 * ring_buffer_record_off(). 2839 * ring_buffer_record_off().
2840 * 2840 *
2841 * This is different than ring_buffer_record_enable() as 2841 * This is different than ring_buffer_record_enable() as
2842 * it works like an on/off switch, where as the enable() verison 2842 * it works like an on/off switch, where as the enable() version
2843 * must be paired with a disable(). 2843 * must be paired with a disable().
2844 */ 2844 */
2845void ring_buffer_record_on(struct ring_buffer *buffer) 2845void ring_buffer_record_on(struct ring_buffer *buffer)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5c38c81496ce..08acf42e325b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -426,15 +426,15 @@ __setup("trace_buf_size=", set_buf_size);
426 426
427static int __init set_tracing_thresh(char *str) 427static int __init set_tracing_thresh(char *str)
428{ 428{
429 unsigned long threshhold; 429 unsigned long threshold;
430 int ret; 430 int ret;
431 431
432 if (!str) 432 if (!str)
433 return 0; 433 return 0;
434 ret = strict_strtoul(str, 0, &threshhold); 434 ret = strict_strtoul(str, 0, &threshold);
435 if (ret < 0) 435 if (ret < 0)
436 return 0; 436 return 0;
437 tracing_thresh = threshhold * 1000; 437 tracing_thresh = threshold * 1000;
438 return 1; 438 return 1;
439} 439}
440__setup("tracing_thresh=", set_tracing_thresh); 440__setup("tracing_thresh=", set_tracing_thresh);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 431dba8b7542..c154797a7ff7 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -2002,7 +2002,7 @@ static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
2002static int __ftrace_function_set_filter(int filter, char *buf, int len, 2002static int __ftrace_function_set_filter(int filter, char *buf, int len,
2003 struct function_filter_data *data) 2003 struct function_filter_data *data)
2004{ 2004{
2005 int i, re_cnt, ret; 2005 int i, re_cnt, ret = -EINVAL;
2006 int *reset; 2006 int *reset;
2007 char **re; 2007 char **re;
2008 2008
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 1003a4d5eb25..2c00a691a540 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1041,6 +1041,8 @@ static int trace_wakeup_test_thread(void *data)
1041 set_current_state(TASK_INTERRUPTIBLE); 1041 set_current_state(TASK_INTERRUPTIBLE);
1042 schedule(); 1042 schedule();
1043 1043
1044 complete(x);
1045
1044 /* we are awake, now wait to disappear */ 1046 /* we are awake, now wait to disappear */
1045 while (!kthread_should_stop()) { 1047 while (!kthread_should_stop()) {
1046 /* 1048 /*
@@ -1084,24 +1086,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1084 /* reset the max latency */ 1086 /* reset the max latency */
1085 tracing_max_latency = 0; 1087 tracing_max_latency = 0;
1086 1088
1087 /* sleep to let the RT thread sleep too */ 1089 while (p->on_rq) {
1088 msleep(100); 1090 /*
1091 * Sleep to make sure the RT thread is asleep too.
1092 * On virtual machines we can't rely on timings,
1093 * but we want to make sure this test still works.
1094 */
1095 msleep(100);
1096 }
1089 1097
1090 /* 1098 init_completion(&isrt);
1091 * Yes this is slightly racy. It is possible that for some
1092 * strange reason that the RT thread we created, did not
1093 * call schedule for 100ms after doing the completion,
1094 * and we do a wakeup on a task that already is awake.
1095 * But that is extremely unlikely, and the worst thing that
1096 * happens in such a case, is that we disable tracing.
1097 * Honestly, if this race does happen something is horrible
1098 * wrong with the system.
1099 */
1100 1099
1101 wake_up_process(p); 1100 wake_up_process(p);
1102 1101
1103 /* give a little time to let the thread wake up */ 1102 /* Wait for the task to wake up */
1104 msleep(100); 1103 wait_for_completion(&isrt);
1105 1104
1106 /* stop the tracing. */ 1105 /* stop the tracing. */
1107 tracing_stop(); 1106 tracing_stop();