aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-28 11:12:49 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-28 12:00:48 -0400
commit925936ebf35a95c290e010b784c962164e6728f3 (patch)
treee0bae3a2655f84dcf238c9acacd158e5ce422b32
parent3f6fe06dbf67b46d36fedec502300e04dffeb67a (diff)
tracing: Pushdown the bkl tracepoints calls
Currently we are calling the bkl tracepoint callbacks just before the bkl lock/unlock operations, ie the tracepoint call is not inside a lock_kernel() function but inside a lock_kernel() macro. Hence the bkl trace event header must be included from smp_lock.h. This raises some nasty circular header dependencies: linux/smp_lock.h -> trace/events/bkl.h -> trace/define_trace.h -> trace/ftrace.h -> linux/ftrace_event.h -> linux/hardirq.h -> linux/smp_lock.h This results in incomplete event declarations, spurious event definitions and other kind of funny behaviours. This is hardly fixable without ugly workarounds. So instead, we push the file name, line number and function name as lock_kernel() parameters, so that we only deal with the trace event header from lib/kernel_lock.c This adds two parameters to lock_kernel() and unlock_kernel() but it should be fine wrt to performances because this pair dos not seem to be called in fast paths. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Li Zefan <lizf@cn.fujitsu.com>
-rw-r--r--include/linux/smp_lock.h28
-rw-r--r--lib/kernel_lock.c15
2 files changed, 26 insertions, 17 deletions
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index d48cc77ba70d..2ea1dd1ba21c 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -3,7 +3,6 @@
3 3
4#ifdef CONFIG_LOCK_KERNEL 4#ifdef CONFIG_LOCK_KERNEL
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <trace/events/bkl.h>
7 6
8#define kernel_locked() (current->lock_depth >= 0) 7#define kernel_locked() (current->lock_depth >= 0)
9 8
@@ -25,18 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
25 return 0; 24 return 0;
26} 25}
27 26
28extern void __lockfunc _lock_kernel(void) __acquires(kernel_lock); 27extern void __lockfunc
29extern void __lockfunc _unlock_kernel(void) __releases(kernel_lock); 28_lock_kernel(const char *func, const char *file, int line)
29__acquires(kernel_lock);
30 30
31#define lock_kernel() { \ 31extern void __lockfunc
32 trace_lock_kernel(__func__, __FILE__, __LINE__); \ 32_unlock_kernel(const char *func, const char *file, int line)
33 _lock_kernel(); \ 33__releases(kernel_lock);
34}
35 34
36#define unlock_kernel() { \ 35#define lock_kernel() do { \
37 trace_unlock_kernel(__func__, __FILE__, __LINE__); \ 36 _lock_kernel(__func__, __FILE__, __LINE__); \
38 _unlock_kernel(); \ 37} while (0)
39} 38
39#define unlock_kernel() do { \
40 _unlock_kernel(__func__, __FILE__, __LINE__); \
41} while (0)
40 42
41/* 43/*
42 * Various legacy drivers don't really need the BKL in a specific 44 * Various legacy drivers don't really need the BKL in a specific
@@ -52,8 +54,8 @@ static inline void cycle_kernel_lock(void)
52 54
53#else 55#else
54 56
55#define lock_kernel() trace_lock_kernel(__func__, __FILE__, __LINE__); 57#define lock_kernel()
56#define unlock_kernel() trace_unlock_kernel(__func__, __FILE__, __LINE__); 58#define unlock_kernel()
57#define release_kernel_lock(task) do { } while(0) 59#define release_kernel_lock(task) do { } while(0)
58#define cycle_kernel_lock() do { } while(0) 60#define cycle_kernel_lock() do { } while(0)
59#define reacquire_kernel_lock(task) 0 61#define reacquire_kernel_lock(task) 0
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5c10b2e1fd08..4ebfa5a164d7 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,9 +8,11 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
10#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#define CREATE_TRACE_POINTS
12#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
13 12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
15
14/* 16/*
15 * The 'big kernel lock' 17 * The 'big kernel lock'
16 * 18 *
@@ -114,19 +116,24 @@ static inline void __unlock_kernel(void)
114 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
115 * worry about other CPU's. 117 * worry about other CPU's.
116 */ 118 */
117void __lockfunc _lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
118{ 120{
119 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
120 if (likely(!depth)) 125 if (likely(!depth))
121 __lock_kernel(); 126 __lock_kernel();
122 current->lock_depth = depth; 127 current->lock_depth = depth;
123} 128}
124 129
125void __lockfunc _unlock_kernel(void) 130void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
126{ 131{
127 BUG_ON(current->lock_depth < 0); 132 BUG_ON(current->lock_depth < 0);
128 if (likely(--current->lock_depth < 0)) 133 if (likely(--current->lock_depth < 0))
129 __unlock_kernel(); 134 __unlock_kernel();
135
136 trace_unlock_kernel(func, file, line);
130} 137}
131 138
132EXPORT_SYMBOL(_lock_kernel); 139EXPORT_SYMBOL(_lock_kernel);