diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-05-12 15:20:42 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:32:46 -0400 |
commit | 81d68a96a39844853b37f20cc8282d9b65b78ef3 (patch) | |
tree | bbc05f415930c15fb5a1004620bd77585fcec43a /kernel/lockdep.c | |
parent | 352ad25aa4a189c667cb2af333948d34692a2d27 (diff) |
ftrace: trace irq disabled critical timings
This patch adds latency tracing for critical timings
(how long interrupts are disabled for).
"irqsoff" is added to /debugfs/tracing/available_tracers
Note:
tracing_max_latency
also holds the max latency for irqsoff (in usecs).
(default to large number so one must start latency tracing)
tracing_thresh
threshold (in usecs) to always print out if irqs off
is detected to be longer than stated here.
If irq_thresh is non-zero, then max_irq_latency
is ignored.
Here's an example of a trace with ftrace_enabled = 0
=======
preemption latency trace v1.1.5 on 2.6.24-rc7
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--------------------------------------------------------------------
latency: 100 us, #3/3, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
-----------------
| task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
-----------------
=> started at: _spin_lock_irqsave+0x2a/0xb7
=> ended at: _spin_unlock_irqrestore+0x32/0x5f
_------=> CPU#
/ _-----=> irqs-off
| / _----=> need-resched
|| / _---=> hardirq/softirq
||| / _--=> preempt-depth
|||| /
||||| delay
cmd pid ||||| time | caller
\ / ||||| \ | /
swapper-0 1d.s3 0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000])
swapper-0 1d.s3 100us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000])
swapper-0 1d.s3 100us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f)
vim:ft=help
=======
And this is a trace with ftrace_enabled == 1
=======
preemption latency trace v1.1.5 on 2.6.24-rc7
--------------------------------------------------------------------
latency: 102 us, #12/12, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
-----------------
| task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
-----------------
=> started at: _spin_lock_irqsave+0x2a/0xb7
=> ended at: _spin_unlock_irqrestore+0x32/0x5f
_------=> CPU#
/ _-----=> irqs-off
| / _----=> need-resched
|| / _---=> hardirq/softirq
||| / _--=> preempt-depth
|||| /
||||| delay
cmd pid ||||| time | caller
\ / ||||| \ | /
swapper-0 1dNs3 0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000])
swapper-0 1dNs3 46us : e1000_read_phy_reg+0x16/0x225 [e1000] (e1000_update_stats+0x5e2/0x64c [e1000])
swapper-0 1dNs3 46us : e1000_swfw_sync_acquire+0x10/0x99 [e1000] (e1000_read_phy_reg+0x49/0x225 [e1000])
swapper-0 1dNs3 46us : e1000_get_hw_eeprom_semaphore+0x12/0xa6 [e1000] (e1000_swfw_sync_acquire+0x36/0x99 [e1000])
swapper-0 1dNs3 47us : __const_udelay+0x9/0x47 (e1000_read_phy_reg+0x116/0x225 [e1000])
swapper-0 1dNs3 47us+: __delay+0x9/0x50 (__const_udelay+0x45/0x47)
swapper-0 1dNs3 97us : preempt_schedule+0xc/0x84 (__delay+0x4e/0x50)
swapper-0 1dNs3 98us : e1000_swfw_sync_release+0xc/0x55 [e1000] (e1000_read_phy_reg+0x211/0x225 [e1000])
swapper-0 1dNs3 99us+: e1000_put_hw_eeprom_semaphore+0x9/0x35 [e1000] (e1000_swfw_sync_release+0x50/0x55 [e1000])
swapper-0 1dNs3 101us : _spin_unlock_irqrestore+0xe/0x5f (e1000_update_stats+0x641/0x64c [e1000])
swapper-0 1dNs3 102us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000])
swapper-0 1dNs3 102us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f)
vim:ft=help
=======
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 81a4e4a3f087..e21924365ea3 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/irqflags.h> | 39 | #include <linux/irqflags.h> |
40 | #include <linux/utsname.h> | 40 | #include <linux/utsname.h> |
41 | #include <linux/hash.h> | 41 | #include <linux/hash.h> |
42 | #include <linux/ftrace.h> | ||
42 | 43 | ||
43 | #include <asm/sections.h> | 44 | #include <asm/sections.h> |
44 | 45 | ||
@@ -982,7 +983,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
982 | return 1; | 983 | return 1; |
983 | } | 984 | } |
984 | 985 | ||
985 | #ifdef CONFIG_TRACE_IRQFLAGS | 986 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
986 | /* | 987 | /* |
987 | * Forwards and backwards subgraph searching, for the purposes of | 988 | * Forwards and backwards subgraph searching, for the purposes of |
988 | * proving that two subgraphs can be connected by a new dependency | 989 | * proving that two subgraphs can be connected by a new dependency |
@@ -1680,7 +1681,7 @@ valid_state(struct task_struct *curr, struct held_lock *this, | |||
1680 | static int mark_lock(struct task_struct *curr, struct held_lock *this, | 1681 | static int mark_lock(struct task_struct *curr, struct held_lock *this, |
1681 | enum lock_usage_bit new_bit); | 1682 | enum lock_usage_bit new_bit); |
1682 | 1683 | ||
1683 | #ifdef CONFIG_TRACE_IRQFLAGS | 1684 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
1684 | 1685 | ||
1685 | /* | 1686 | /* |
1686 | * print irq inversion bug: | 1687 | * print irq inversion bug: |
@@ -2013,11 +2014,13 @@ void early_boot_irqs_on(void) | |||
2013 | /* | 2014 | /* |
2014 | * Hardirqs will be enabled: | 2015 | * Hardirqs will be enabled: |
2015 | */ | 2016 | */ |
2016 | void trace_hardirqs_on(void) | 2017 | void notrace trace_hardirqs_on_caller(unsigned long a0) |
2017 | { | 2018 | { |
2018 | struct task_struct *curr = current; | 2019 | struct task_struct *curr = current; |
2019 | unsigned long ip; | 2020 | unsigned long ip; |
2020 | 2021 | ||
2022 | time_hardirqs_on(CALLER_ADDR0, a0); | ||
2023 | |||
2021 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2024 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
2022 | return; | 2025 | return; |
2023 | 2026 | ||
@@ -2055,16 +2058,23 @@ void trace_hardirqs_on(void) | |||
2055 | curr->hardirq_enable_event = ++curr->irq_events; | 2058 | curr->hardirq_enable_event = ++curr->irq_events; |
2056 | debug_atomic_inc(&hardirqs_on_events); | 2059 | debug_atomic_inc(&hardirqs_on_events); |
2057 | } | 2060 | } |
2061 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | ||
2058 | 2062 | ||
2063 | void notrace trace_hardirqs_on(void) | ||
2064 | { | ||
2065 | trace_hardirqs_on_caller(CALLER_ADDR0); | ||
2066 | } | ||
2059 | EXPORT_SYMBOL(trace_hardirqs_on); | 2067 | EXPORT_SYMBOL(trace_hardirqs_on); |
2060 | 2068 | ||
2061 | /* | 2069 | /* |
2062 | * Hardirqs were disabled: | 2070 | * Hardirqs were disabled: |
2063 | */ | 2071 | */ |
2064 | void trace_hardirqs_off(void) | 2072 | void notrace trace_hardirqs_off_caller(unsigned long a0) |
2065 | { | 2073 | { |
2066 | struct task_struct *curr = current; | 2074 | struct task_struct *curr = current; |
2067 | 2075 | ||
2076 | time_hardirqs_off(CALLER_ADDR0, a0); | ||
2077 | |||
2068 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2078 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
2069 | return; | 2079 | return; |
2070 | 2080 | ||
@@ -2082,7 +2092,12 @@ void trace_hardirqs_off(void) | |||
2082 | } else | 2092 | } else |
2083 | debug_atomic_inc(&redundant_hardirqs_off); | 2093 | debug_atomic_inc(&redundant_hardirqs_off); |
2084 | } | 2094 | } |
2095 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | ||
2085 | 2096 | ||
2097 | void notrace trace_hardirqs_off(void) | ||
2098 | { | ||
2099 | trace_hardirqs_off_caller(CALLER_ADDR0); | ||
2100 | } | ||
2086 | EXPORT_SYMBOL(trace_hardirqs_off); | 2101 | EXPORT_SYMBOL(trace_hardirqs_off); |
2087 | 2102 | ||
2088 | /* | 2103 | /* |