aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-28 11:12:49 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-28 12:00:48 -0400
commit925936ebf35a95c290e010b784c962164e6728f3 (patch)
treee0bae3a2655f84dcf238c9acacd158e5ce422b32 /lib/kernel_lock.c
parent3f6fe06dbf67b46d36fedec502300e04dffeb67a (diff)
tracing: Pushdown the bkl tracepoints calls
Currently we are calling the bkl tracepoint callbacks just before the bkl lock/unlock operations, ie the tracepoint call is not inside a lock_kernel() function but inside a lock_kernel() macro. Hence the bkl trace event header must be included from smp_lock.h. This raises some nasty circular header dependencies: linux/smp_lock.h -> trace/events/bkl.h -> trace/define_trace.h -> trace/ftrace.h -> linux/ftrace_event.h -> linux/hardirq.h -> linux/smp_lock.h This results in incomplete event declarations, spurious event definitions and other kind of funny behaviours. This is hardly fixable without ugly workarounds. So instead, we push the file name, line number and function name as lock_kernel() parameters, so that we only deal with the trace event header from lib/kernel_lock.c This adds two parameters to lock_kernel() and unlock_kernel() but it should be fine wrt to performances because this pair dos not seem to be called in fast paths. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5c10b2e1fd08..4ebfa5a164d7 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,9 +8,11 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
10#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#define CREATE_TRACE_POINTS
12#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
13 12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
15
14/* 16/*
15 * The 'big kernel lock' 17 * The 'big kernel lock'
16 * 18 *
@@ -114,19 +116,24 @@ static inline void __unlock_kernel(void)
114 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
115 * worry about other CPU's. 117 * worry about other CPU's.
116 */ 118 */
117void __lockfunc _lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
118{ 120{
119 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
120 if (likely(!depth)) 125 if (likely(!depth))
121 __lock_kernel(); 126 __lock_kernel();
122 current->lock_depth = depth; 127 current->lock_depth = depth;
123} 128}
124 129
125void __lockfunc _unlock_kernel(void) 130void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
126{ 131{
127 BUG_ON(current->lock_depth < 0); 132 BUG_ON(current->lock_depth < 0);
128 if (likely(--current->lock_depth < 0)) 133 if (likely(--current->lock_depth < 0))
129 __unlock_kernel(); 134 __unlock_kernel();
135
136 trace_unlock_kernel(func, file, line);
130} 137}
131 138
132EXPORT_SYMBOL(_lock_kernel); 139EXPORT_SYMBOL(_lock_kernel);