aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2009-03-04 06:32:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-04 12:49:58 -0500
commitefed792d6738964f399a508ef9e831cd60fa4657 (patch)
treeb4b5e472bafb3d5d0d8ea26680e1d8cc87365c30 /kernel
parent28b1bd1cbc33cae95a309691d814399a69cf3070 (diff)
tracing: add lockdep tracepoints for lock acquire/release
Augment the traces with lock names when lockdep is available: 1) | down_read_trylock() { 1) | _spin_lock_irqsave() { 1) | /* lock_acquire: &sem->wait_lock */ 1) 4.201 us | } 1) | _spin_unlock_irqrestore() { 1) | /* lock_release: &sem->wait_lock */ 1) 3.523 us | } 1) | /* lock_acquire: try read &mm->mmap_sem */ 1) + 13.386 us | } 1) 1.635 us | find_vma(); 1) | handle_mm_fault() { 1) | __do_fault() { 1) | filemap_fault() { 1) | find_lock_page() { 1) | find_get_page() { 1) | /* lock_acquire: read rcu_read_lock */ 1) | /* lock_release: rcu_read_lock */ 1) 5.697 us | } 1) 8.158 us | } 1) + 11.079 us | } 1) | _spin_lock() { 1) | /* lock_acquire: __pte_lockptr(page) */ 1) 3.949 us | } 1) 1.460 us | page_add_file_rmap(); 1) | _spin_unlock() { 1) | /* lock_release: __pte_lockptr(page) */ 1) 3.115 us | } 1) | unlock_page() { 1) 1.421 us | page_waitqueue(); 1) 1.220 us | __wake_up_bit(); 1) 6.519 us | } 1) + 34.328 us | } 1) + 37.452 us | } 1) | up_read() { 1) | /* lock_release: &mm->mmap_sem */ 1) | _spin_lock_irqsave() { 1) | /* lock_acquire: &sem->wait_lock */ 1) 3.865 us | } 1) | _spin_unlock_irqrestore() { 1) | /* lock_release: &sem->wait_lock */ 1) 8.562 us | } 1) + 17.370 us | } Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: =?ISO-8859-1?Q?T=F6r=F6k?= Edwin <edwintorok@gmail.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1236166375.5330.7209.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/trace/trace.c14
-rw-r--r--kernel/trace/trace_events_stage_3.h4
3 files changed, 27 insertions, 8 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 02014f7ccc86..cb70c1db85d0 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <trace/lockdep.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -2913,6 +2914,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
2913} 2914}
2914EXPORT_SYMBOL_GPL(lock_set_class); 2915EXPORT_SYMBOL_GPL(lock_set_class);
2915 2916
2917DEFINE_TRACE(lock_acquire);
2918
2916/* 2919/*
2917 * We are not always called with irqs disabled - do that here, 2920 * We are not always called with irqs disabled - do that here,
2918 * and also avoid lockdep recursion: 2921 * and also avoid lockdep recursion:
@@ -2923,6 +2926,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2923{ 2926{
2924 unsigned long flags; 2927 unsigned long flags;
2925 2928
2929 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
2930
2926 if (unlikely(current->lockdep_recursion)) 2931 if (unlikely(current->lockdep_recursion))
2927 return; 2932 return;
2928 2933
@@ -2937,11 +2942,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2937} 2942}
2938EXPORT_SYMBOL_GPL(lock_acquire); 2943EXPORT_SYMBOL_GPL(lock_acquire);
2939 2944
2945DEFINE_TRACE(lock_release);
2946
2940void lock_release(struct lockdep_map *lock, int nested, 2947void lock_release(struct lockdep_map *lock, int nested,
2941 unsigned long ip) 2948 unsigned long ip)
2942{ 2949{
2943 unsigned long flags; 2950 unsigned long flags;
2944 2951
2952 trace_lock_release(lock, nested, ip);
2953
2945 if (unlikely(current->lockdep_recursion)) 2954 if (unlikely(current->lockdep_recursion))
2946 return; 2955 return;
2947 2956
@@ -3090,10 +3099,14 @@ found_it:
3090 lock->ip = ip; 3099 lock->ip = ip;
3091} 3100}
3092 3101
3102DEFINE_TRACE(lock_contended);
3103
3093void lock_contended(struct lockdep_map *lock, unsigned long ip) 3104void lock_contended(struct lockdep_map *lock, unsigned long ip)
3094{ 3105{
3095 unsigned long flags; 3106 unsigned long flags;
3096 3107
3108 trace_lock_contended(lock, ip);
3109
3097 if (unlikely(!lock_stat)) 3110 if (unlikely(!lock_stat))
3098 return; 3111 return;
3099 3112
@@ -3109,10 +3122,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3109} 3122}
3110EXPORT_SYMBOL_GPL(lock_contended); 3123EXPORT_SYMBOL_GPL(lock_contended);
3111 3124
3125DEFINE_TRACE(lock_acquired);
3126
3112void lock_acquired(struct lockdep_map *lock, unsigned long ip) 3127void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3113{ 3128{
3114 unsigned long flags; 3129 unsigned long flags;
3115 3130
3131 trace_lock_acquired(lock, ip);
3132
3116 if (unlikely(!lock_stat)) 3133 if (unlikely(!lock_stat))
3117 return; 3134 return;
3118 3135
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 12539f72f4a5..c8abbb0c8397 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -623,7 +623,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
623static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 623static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
624static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 624static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
625static int cmdline_idx; 625static int cmdline_idx;
626static DEFINE_SPINLOCK(trace_cmdline_lock); 626static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
627 627
628/* temporary disable recording */ 628/* temporary disable recording */
629static atomic_t trace_record_cmdline_disabled __read_mostly; 629static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -735,7 +735,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
735 * nor do we want to disable interrupts, 735 * nor do we want to disable interrupts,
736 * so if we miss here, then better luck next time. 736 * so if we miss here, then better luck next time.
737 */ 737 */
738 if (!spin_trylock(&trace_cmdline_lock)) 738 if (!__raw_spin_trylock(&trace_cmdline_lock))
739 return; 739 return;
740 740
741 idx = map_pid_to_cmdline[tsk->pid]; 741 idx = map_pid_to_cmdline[tsk->pid];
@@ -753,7 +753,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
753 753
754 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 754 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
755 755
756 spin_unlock(&trace_cmdline_lock); 756 __raw_spin_unlock(&trace_cmdline_lock);
757} 757}
758 758
759char *trace_find_cmdline(int pid) 759char *trace_find_cmdline(int pid)
@@ -3751,7 +3751,7 @@ static __init int tracer_init_debugfs(void)
3751 3751
3752int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 3752int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3753{ 3753{
3754 static DEFINE_SPINLOCK(trace_buf_lock); 3754 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
3755 static char trace_buf[TRACE_BUF_SIZE]; 3755 static char trace_buf[TRACE_BUF_SIZE];
3756 3756
3757 struct ring_buffer_event *event; 3757 struct ring_buffer_event *event;
@@ -3773,7 +3773,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3773 goto out; 3773 goto out;
3774 3774
3775 pause_graph_tracing(); 3775 pause_graph_tracing();
3776 spin_lock_irqsave(&trace_buf_lock, irq_flags); 3776 raw_local_irq_save(irq_flags);
3777 __raw_spin_lock(&trace_buf_lock);
3777 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3778 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3778 3779
3779 len = min(len, TRACE_BUF_SIZE-1); 3780 len = min(len, TRACE_BUF_SIZE-1);
@@ -3792,7 +3793,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3792 ring_buffer_unlock_commit(tr->buffer, event); 3793 ring_buffer_unlock_commit(tr->buffer, event);
3793 3794
3794 out_unlock: 3795 out_unlock:
3795 spin_unlock_irqrestore(&trace_buf_lock, irq_flags); 3796 __raw_spin_unlock(&trace_buf_lock);
3797 raw_local_irq_restore(irq_flags);
3796 unpause_graph_tracing(); 3798 unpause_graph_tracing();
3797 out: 3799 out:
3798 preempt_enable_notrace(); 3800 preempt_enable_notrace();
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
index 041789ffbac1..2c8d76c7dbed 100644
--- a/kernel/trace/trace_events_stage_3.h
+++ b/kernel/trace/trace_events_stage_3.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * static void ftrace_event_<call>(proto) 6 * static void ftrace_event_<call>(proto)
7 * { 7 * {
8 * event_trace_printk(_RET_IP_, "(<call>) " <fmt>); 8 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
9 * } 9 * }
10 * 10 *
11 * static int ftrace_reg_event_<call>(void) 11 * static int ftrace_reg_event_<call>(void)
@@ -112,7 +112,7 @@
112#define _TRACE_FORMAT(call, proto, args, fmt) \ 112#define _TRACE_FORMAT(call, proto, args, fmt) \
113static void ftrace_event_##call(proto) \ 113static void ftrace_event_##call(proto) \
114{ \ 114{ \
115 event_trace_printk(_RET_IP_, "(" #call ") " fmt); \ 115 event_trace_printk(_RET_IP_, #call ": " fmt); \
116} \ 116} \
117 \ 117 \
118static int ftrace_reg_event_##call(void) \ 118static int ftrace_reg_event_##call(void) \