aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-31 19:34:24 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-24 09:16:31 -0400
commit96a2c464de07d7c72988db851c029b204fc59108 (patch)
tree6e24c17c603268c097069000883b83bc51b4d112 /lib
parent0efb4d20723d58edbad29d1ff98a86b631adb5e6 (diff)
tracing/bkl: Add bkl ftrace events
Add two events lock_kernel and unlock_kernel() to trace the bkl uses. This opens the door for userspace tools to perform statistics about the callsites that use it, dependencies with other locks (by pairing the trace with lock events), use with recursivity and so on... The {__reacquire,release}_kernel_lock() events are not traced because these are called from schedule, thus the sched events are sufficient to trace them. Example of a trace: hald-addon-stor-4152 [000] 165.875501: unlock_kernel: depth: 0, fs/block_dev.c:1358 __blkdev_put() hald-addon-stor-4152 [000] 167.832974: lock_kernel: depth: 0, fs/block_dev.c:1167 __blkdev_get() How to get the callsites that acquire it recursively: cd /debug/tracing/events/bkl echo "lock_depth > 0" > filter firefox-4951 [001] 206.276967: unlock_kernel: depth: 1, fs/reiserfs/super.c:575 reiserfs_dirty_inode() You can also filter by file and/or line. v2: Use of FILTER_PTR_STRING attribute for files and lines fields to make them traceable. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/kernel_lock.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..5c10b2e1fd08 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,11 @@
5 * relegated to obsolescence, but used by various less 5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems. 6 * important (or lazy) subsystems.
7 */ 7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
11#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#define CREATE_TRACE_POINTS
12#include <linux/smp_lock.h>
12 13
13/* 14/*
14 * The 'big kernel lock' 15 * The 'big kernel lock'
@@ -113,7 +114,7 @@ static inline void __unlock_kernel(void)
113 * This cannot happen asynchronously, so we only need to 114 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's. 115 * worry about other CPU's.
115 */ 116 */
116void __lockfunc lock_kernel(void) 117void __lockfunc _lock_kernel(void)
117{ 118{
118 int depth = current->lock_depth+1; 119 int depth = current->lock_depth+1;
119 if (likely(!depth)) 120 if (likely(!depth))
@@ -121,13 +122,13 @@ void __lockfunc lock_kernel(void)
121 current->lock_depth = depth; 122 current->lock_depth = depth;
122} 123}
123 124
124void __lockfunc unlock_kernel(void) 125void __lockfunc _unlock_kernel(void)
125{ 126{
126 BUG_ON(current->lock_depth < 0); 127 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0)) 128 if (likely(--current->lock_depth < 0))
128 __unlock_kernel(); 129 __unlock_kernel();
129} 130}
130 131
131EXPORT_SYMBOL(lock_kernel); 132EXPORT_SYMBOL(_lock_kernel);
132EXPORT_SYMBOL(unlock_kernel); 133EXPORT_SYMBOL(_unlock_kernel);
133 134