diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-06-25 01:30:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-25 04:31:30 -0400 |
commit | 1155de47cd66d0c496d5a6fb2223e980ef1285b2 (patch) | |
tree | 0ee762d6b1215a37fe89acc174de8cbbf476d218 /kernel/trace | |
parent | 00e54d087afb3867b0b461aef6c1ff433d0df564 (diff) |
ring-buffer: Make it generally available
In hunting down the cause for the hwlat_detector ring buffer spew in
my failed -next builds it became obvious that folks are now treating
ring_buffer as something that is generic independent of tracing and thus,
suitable for public driver consumption.
Given that there are only a few minor areas in ring_buffer that have any
reliance on CONFIG_TRACING or CONFIG_FUNCTION_TRACER, provide stubs for
those and make it generally available.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Cc: Jon Masters <jcm@jonmasters.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <20090625053012.GB19944@linux-sh.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 11 | ||||
-rw-r--r-- | kernel/trace/trace.h | 7 |
2 files changed, 18 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 04dac2638258..bf27bb7a63e2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1563 | return NULL; | 1563 | return NULL; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | #ifdef CONFIG_TRACING | ||
1567 | |||
1566 | #define TRACE_RECURSIVE_DEPTH 16 | 1568 | #define TRACE_RECURSIVE_DEPTH 16 |
1567 | 1569 | ||
1568 | static int trace_recursive_lock(void) | 1570 | static int trace_recursive_lock(void) |
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void) | |||
1593 | current->trace_recursion--; | 1595 | current->trace_recursion--; |
1594 | } | 1596 | } |
1595 | 1597 | ||
1598 | #else | ||
1599 | |||
1600 | #define trace_recursive_lock() (0) | ||
1601 | #define trace_recursive_unlock() do { } while (0) | ||
1602 | |||
1603 | #endif | ||
1604 | |||
1596 | static DEFINE_PER_CPU(int, rb_need_resched); | 1605 | static DEFINE_PER_CPU(int, rb_need_resched); |
1597 | 1606 | ||
1598 | /** | 1607 | /** |
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3104 | } | 3113 | } |
3105 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 3114 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); |
3106 | 3115 | ||
3116 | #ifdef CONFIG_TRACING | ||
3107 | static ssize_t | 3117 | static ssize_t |
3108 | rb_simple_read(struct file *filp, char __user *ubuf, | 3118 | rb_simple_read(struct file *filp, char __user *ubuf, |
3109 | size_t cnt, loff_t *ppos) | 3119 | size_t cnt, loff_t *ppos) |
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void) | |||
3171 | } | 3181 | } |
3172 | 3182 | ||
3173 | fs_initcall(rb_init_debugfs); | 3183 | fs_initcall(rb_init_debugfs); |
3184 | #endif | ||
3174 | 3185 | ||
3175 | #ifdef CONFIG_HOTPLUG_CPU | 3186 | #ifdef CONFIG_HOTPLUG_CPU |
3176 | static int rb_cpu_notify(struct notifier_block *self, | 3187 | static int rb_cpu_notify(struct notifier_block *self, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6e735d4771f8..3548ae5cc780 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter) | |||
597 | 597 | ||
598 | extern struct pid *ftrace_pid_trace; | 598 | extern struct pid *ftrace_pid_trace; |
599 | 599 | ||
600 | #ifdef CONFIG_FUNCTION_TRACER | ||
600 | static inline int ftrace_trace_task(struct task_struct *task) | 601 | static inline int ftrace_trace_task(struct task_struct *task) |
601 | { | 602 | { |
602 | if (!ftrace_pid_trace) | 603 | if (!ftrace_pid_trace) |
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
604 | 605 | ||
605 | return test_tsk_trace_trace(task); | 606 | return test_tsk_trace_trace(task); |
606 | } | 607 | } |
608 | #else | ||
609 | static inline int ftrace_trace_task(struct task_struct *task) | ||
610 | { | ||
611 | return 1; | ||
612 | } | ||
613 | #endif | ||
607 | 614 | ||
608 | /* | 615 | /* |
609 | * trace_iterator_flags is an enumeration that defines bit | 616 | * trace_iterator_flags is an enumeration that defines bit |