aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 81a4e4a3f087..e21924365ea3 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -39,6 +39,7 @@
39#include <linux/irqflags.h> 39#include <linux/irqflags.h>
40#include <linux/utsname.h> 40#include <linux/utsname.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/ftrace.h>
42 43
43#include <asm/sections.h> 44#include <asm/sections.h>
44 45
@@ -982,7 +983,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
982 return 1; 983 return 1;
983} 984}
984 985
985#ifdef CONFIG_TRACE_IRQFLAGS 986#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
986/* 987/*
987 * Forwards and backwards subgraph searching, for the purposes of 988 * Forwards and backwards subgraph searching, for the purposes of
988 * proving that two subgraphs can be connected by a new dependency 989 * proving that two subgraphs can be connected by a new dependency
@@ -1680,7 +1681,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
1680static int mark_lock(struct task_struct *curr, struct held_lock *this, 1681static int mark_lock(struct task_struct *curr, struct held_lock *this,
1681 enum lock_usage_bit new_bit); 1682 enum lock_usage_bit new_bit);
1682 1683
1683#ifdef CONFIG_TRACE_IRQFLAGS 1684#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1684 1685
1685/* 1686/*
1686 * print irq inversion bug: 1687 * print irq inversion bug:
@@ -2013,11 +2014,13 @@ void early_boot_irqs_on(void)
2013/* 2014/*
2014 * Hardirqs will be enabled: 2015 * Hardirqs will be enabled:
2015 */ 2016 */
2016void trace_hardirqs_on(void) 2017void notrace trace_hardirqs_on_caller(unsigned long a0)
2017{ 2018{
2018 struct task_struct *curr = current; 2019 struct task_struct *curr = current;
2019 unsigned long ip; 2020 unsigned long ip;
2020 2021
2022 time_hardirqs_on(CALLER_ADDR0, a0);
2023
2021 if (unlikely(!debug_locks || current->lockdep_recursion)) 2024 if (unlikely(!debug_locks || current->lockdep_recursion))
2022 return; 2025 return;
2023 2026
@@ -2055,16 +2058,23 @@ void trace_hardirqs_on(void)
2055 curr->hardirq_enable_event = ++curr->irq_events; 2058 curr->hardirq_enable_event = ++curr->irq_events;
2056 debug_atomic_inc(&hardirqs_on_events); 2059 debug_atomic_inc(&hardirqs_on_events);
2057} 2060}
2061EXPORT_SYMBOL(trace_hardirqs_on_caller);
2058 2062
2063void notrace trace_hardirqs_on(void)
2064{
2065 trace_hardirqs_on_caller(CALLER_ADDR0);
2066}
2059EXPORT_SYMBOL(trace_hardirqs_on); 2067EXPORT_SYMBOL(trace_hardirqs_on);
2060 2068
2061/* 2069/*
2062 * Hardirqs were disabled: 2070 * Hardirqs were disabled:
2063 */ 2071 */
2064void trace_hardirqs_off(void) 2072void notrace trace_hardirqs_off_caller(unsigned long a0)
2065{ 2073{
2066 struct task_struct *curr = current; 2074 struct task_struct *curr = current;
2067 2075
2076 time_hardirqs_off(CALLER_ADDR0, a0);
2077
2068 if (unlikely(!debug_locks || current->lockdep_recursion)) 2078 if (unlikely(!debug_locks || current->lockdep_recursion))
2069 return; 2079 return;
2070 2080
@@ -2082,7 +2092,12 @@ void trace_hardirqs_off(void)
2082 } else 2092 } else
2083 debug_atomic_inc(&redundant_hardirqs_off); 2093 debug_atomic_inc(&redundant_hardirqs_off);
2084} 2094}
2095EXPORT_SYMBOL(trace_hardirqs_off_caller);
2085 2096
2097void notrace trace_hardirqs_off(void)
2098{
2099 trace_hardirqs_off_caller(CALLER_ADDR0);
2100}
2086EXPORT_SYMBOL(trace_hardirqs_off); 2101EXPORT_SYMBOL(trace_hardirqs_off);
2087 2102
2088/* 2103/*