aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 04:30:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 04:30:50 -0400
commit7f60ba388f5b9dd8b0da463b394412dace3ab814 (patch)
treeb97b4fb5c8ad07a435e5b1b559988364764d5e8d /kernel
parente665faa424a4a782aa986274920c1fc5b76f5560 (diff)
parent80c9d03c22f13a17df67b4b99a83ed5e9acf6093 (diff)
Merge tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore
Pull pstore changes from Anton Vorontsov: 1) We no longer ad-hoc to the function tracer "high level" infrastructure and no longer use its debugfs knobs. The change slightly touches kernel/trace directory, but it got the needed ack from Steven Rostedt: http://lkml.org/lkml/2012/8/21/688 2) Added maintainers entry; 3) A bunch of fixes, nothing special. * tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore: pstore: Avoid recursive spinlocks in the oops_in_progress case pstore/ftrace: Convert to its own enable/disable debugfs knob pstore/ram: Add missing platform_device_unregister MAINTAINERS: Add pstore maintainers pstore/ram: Mark ramoops_pstore_write_buf() as notrace pstore/ram: Fix printk format warning pstore/ram: Fix possible NULL dereference
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_functions.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 483162a9f908..507a7a9630bf 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,7 +13,6 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/pstore.h>
17#include <linux/fs.h> 16#include <linux/fs.h>
18 17
19#include "trace.h" 18#include "trace.h"
@@ -76,10 +75,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
76 preempt_enable_notrace(); 75 preempt_enable_notrace();
77} 76}
78 77
79/* Our two options */ 78/* Our option */
80enum { 79enum {
81 TRACE_FUNC_OPT_STACK = 0x1, 80 TRACE_FUNC_OPT_STACK = 0x1,
82 TRACE_FUNC_OPT_PSTORE = 0x2,
83}; 81};
84 82
85static struct tracer_flags func_flags; 83static struct tracer_flags func_flags;
@@ -109,12 +107,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
109 disabled = atomic_inc_return(&data->disabled); 107 disabled = atomic_inc_return(&data->disabled);
110 108
111 if (likely(disabled == 1)) { 109 if (likely(disabled == 1)) {
112 /*
113 * So far tracing doesn't support multiple buffers, so
114 * we make an explicit call for now.
115 */
116 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 pstore_ftrace_call(ip, parent_ip);
118 pc = preempt_count(); 110 pc = preempt_count();
119 trace_function(tr, ip, parent_ip, flags, pc); 111 trace_function(tr, ip, parent_ip, flags, pc);
120 } 112 }
@@ -181,9 +173,6 @@ static struct tracer_opt func_opts[] = {
181#ifdef CONFIG_STACKTRACE 173#ifdef CONFIG_STACKTRACE
182 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 174 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
183#endif 175#endif
184#ifdef CONFIG_PSTORE_FTRACE
185 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
186#endif
187 { } /* Always set a last empty entry */ 176 { } /* Always set a last empty entry */
188}; 177};
189 178
@@ -236,8 +225,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
236 } 225 }
237 226
238 break; 227 break;
239 case TRACE_FUNC_OPT_PSTORE:
240 break;
241 default: 228 default:
242 return -EINVAL; 229 return -EINVAL;
243 } 230 }