aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c80
1 files changed, 69 insertions, 11 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 81a4e4a3f087..d38a64362973 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -39,6 +39,7 @@
39#include <linux/irqflags.h> 39#include <linux/irqflags.h>
40#include <linux/utsname.h> 40#include <linux/utsname.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/ftrace.h>
42 43
43#include <asm/sections.h> 44#include <asm/sections.h>
44 45
@@ -81,6 +82,8 @@ static int graph_lock(void)
81 __raw_spin_unlock(&lockdep_lock); 82 __raw_spin_unlock(&lockdep_lock);
82 return 0; 83 return 0;
83 } 84 }
85 /* prevent any recursions within lockdep from causing deadlocks */
86 current->lockdep_recursion++;
84 return 1; 87 return 1;
85} 88}
86 89
@@ -89,6 +92,7 @@ static inline int graph_unlock(void)
89 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 92 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
90 return DEBUG_LOCKS_WARN_ON(1); 93 return DEBUG_LOCKS_WARN_ON(1);
91 94
95 current->lockdep_recursion--;
92 __raw_spin_unlock(&lockdep_lock); 96 __raw_spin_unlock(&lockdep_lock);
93 return 0; 97 return 0;
94} 98}
@@ -982,7 +986,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
982 return 1; 986 return 1;
983} 987}
984 988
985#ifdef CONFIG_TRACE_IRQFLAGS 989#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
986/* 990/*
987 * Forwards and backwards subgraph searching, for the purposes of 991 * Forwards and backwards subgraph searching, for the purposes of
988 * proving that two subgraphs can be connected by a new dependency 992 * proving that two subgraphs can be connected by a new dependency
@@ -1458,7 +1462,14 @@ out_bug:
1458} 1462}
1459 1463
1460unsigned long nr_lock_chains; 1464unsigned long nr_lock_chains;
1461static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 1465struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1466int nr_chain_hlocks;
1467static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1468
1469struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1470{
1471 return lock_classes + chain_hlocks[chain->base + i];
1472}
1462 1473
1463/* 1474/*
1464 * Look up a dependency chain. If the key is not present yet then 1475 * Look up a dependency chain. If the key is not present yet then
@@ -1466,10 +1477,15 @@ static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1466 * validated. If the key is already hashed, return 0. 1477 * validated. If the key is already hashed, return 0.
1467 * (On return with 1 graph_lock is held.) 1478 * (On return with 1 graph_lock is held.)
1468 */ 1479 */
1469static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) 1480static inline int lookup_chain_cache(struct task_struct *curr,
1481 struct held_lock *hlock,
1482 u64 chain_key)
1470{ 1483{
1484 struct lock_class *class = hlock->class;
1471 struct list_head *hash_head = chainhashentry(chain_key); 1485 struct list_head *hash_head = chainhashentry(chain_key);
1472 struct lock_chain *chain; 1486 struct lock_chain *chain;
1487 struct held_lock *hlock_curr, *hlock_next;
1488 int i, j, n, cn;
1473 1489
1474 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 1490 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1475 return 0; 1491 return 0;
@@ -1517,6 +1533,32 @@ cache_hit:
1517 } 1533 }
1518 chain = lock_chains + nr_lock_chains++; 1534 chain = lock_chains + nr_lock_chains++;
1519 chain->chain_key = chain_key; 1535 chain->chain_key = chain_key;
1536 chain->irq_context = hlock->irq_context;
1537 /* Find the first held_lock of current chain */
1538 hlock_next = hlock;
1539 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1540 hlock_curr = curr->held_locks + i;
1541 if (hlock_curr->irq_context != hlock_next->irq_context)
1542 break;
1543 hlock_next = hlock;
1544 }
1545 i++;
1546 chain->depth = curr->lockdep_depth + 1 - i;
1547 cn = nr_chain_hlocks;
1548 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1549 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1550 if (n == cn)
1551 break;
1552 cn = n;
1553 }
1554 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1555 chain->base = cn;
1556 for (j = 0; j < chain->depth - 1; j++, i++) {
1557 int lock_id = curr->held_locks[i].class - lock_classes;
1558 chain_hlocks[chain->base + j] = lock_id;
1559 }
1560 chain_hlocks[chain->base + j] = class - lock_classes;
1561 }
1520 list_add_tail_rcu(&chain->entry, hash_head); 1562 list_add_tail_rcu(&chain->entry, hash_head);
1521 debug_atomic_inc(&chain_lookup_misses); 1563 debug_atomic_inc(&chain_lookup_misses);
1522 inc_chains(); 1564 inc_chains();
@@ -1538,7 +1580,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1538 * graph_lock for us) 1580 * graph_lock for us)
1539 */ 1581 */
1540 if (!hlock->trylock && (hlock->check == 2) && 1582 if (!hlock->trylock && (hlock->check == 2) &&
1541 lookup_chain_cache(chain_key, hlock->class)) { 1583 lookup_chain_cache(curr, hlock, chain_key)) {
1542 /* 1584 /*
1543 * Check whether last held lock: 1585 * Check whether last held lock:
1544 * 1586 *
@@ -1680,7 +1722,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
1680static int mark_lock(struct task_struct *curr, struct held_lock *this, 1722static int mark_lock(struct task_struct *curr, struct held_lock *this,
1681 enum lock_usage_bit new_bit); 1723 enum lock_usage_bit new_bit);
1682 1724
1683#ifdef CONFIG_TRACE_IRQFLAGS 1725#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1684 1726
1685/* 1727/*
1686 * print irq inversion bug: 1728 * print irq inversion bug:
@@ -2013,11 +2055,13 @@ void early_boot_irqs_on(void)
2013/* 2055/*
2014 * Hardirqs will be enabled: 2056 * Hardirqs will be enabled:
2015 */ 2057 */
2016void trace_hardirqs_on(void) 2058void trace_hardirqs_on_caller(unsigned long a0)
2017{ 2059{
2018 struct task_struct *curr = current; 2060 struct task_struct *curr = current;
2019 unsigned long ip; 2061 unsigned long ip;
2020 2062
2063 time_hardirqs_on(CALLER_ADDR0, a0);
2064
2021 if (unlikely(!debug_locks || current->lockdep_recursion)) 2065 if (unlikely(!debug_locks || current->lockdep_recursion))
2022 return; 2066 return;
2023 2067
@@ -2055,16 +2099,23 @@ void trace_hardirqs_on(void)
2055 curr->hardirq_enable_event = ++curr->irq_events; 2099 curr->hardirq_enable_event = ++curr->irq_events;
2056 debug_atomic_inc(&hardirqs_on_events); 2100 debug_atomic_inc(&hardirqs_on_events);
2057} 2101}
2102EXPORT_SYMBOL(trace_hardirqs_on_caller);
2058 2103
2104void trace_hardirqs_on(void)
2105{
2106 trace_hardirqs_on_caller(CALLER_ADDR0);
2107}
2059EXPORT_SYMBOL(trace_hardirqs_on); 2108EXPORT_SYMBOL(trace_hardirqs_on);
2060 2109
2061/* 2110/*
2062 * Hardirqs were disabled: 2111 * Hardirqs were disabled:
2063 */ 2112 */
2064void trace_hardirqs_off(void) 2113void trace_hardirqs_off_caller(unsigned long a0)
2065{ 2114{
2066 struct task_struct *curr = current; 2115 struct task_struct *curr = current;
2067 2116
2117 time_hardirqs_off(CALLER_ADDR0, a0);
2118
2068 if (unlikely(!debug_locks || current->lockdep_recursion)) 2119 if (unlikely(!debug_locks || current->lockdep_recursion))
2069 return; 2120 return;
2070 2121
@@ -2082,7 +2133,12 @@ void trace_hardirqs_off(void)
2082 } else 2133 } else
2083 debug_atomic_inc(&redundant_hardirqs_off); 2134 debug_atomic_inc(&redundant_hardirqs_off);
2084} 2135}
2136EXPORT_SYMBOL(trace_hardirqs_off_caller);
2085 2137
2138void trace_hardirqs_off(void)
2139{
2140 trace_hardirqs_off_caller(CALLER_ADDR0);
2141}
2086EXPORT_SYMBOL(trace_hardirqs_off); 2142EXPORT_SYMBOL(trace_hardirqs_off);
2087 2143
2088/* 2144/*
@@ -2246,7 +2302,7 @@ static inline int separate_irq_context(struct task_struct *curr,
2246 * Mark a lock with a usage bit, and validate the state transition: 2302 * Mark a lock with a usage bit, and validate the state transition:
2247 */ 2303 */
2248static int mark_lock(struct task_struct *curr, struct held_lock *this, 2304static int mark_lock(struct task_struct *curr, struct held_lock *this,
2249 enum lock_usage_bit new_bit) 2305 enum lock_usage_bit new_bit)
2250{ 2306{
2251 unsigned int new_mask = 1 << new_bit, ret = 1; 2307 unsigned int new_mask = 1 << new_bit, ret = 1;
2252 2308
@@ -2650,7 +2706,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2650 */ 2706 */
2651static void check_flags(unsigned long flags) 2707static void check_flags(unsigned long flags)
2652{ 2708{
2653#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) 2709#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2710 defined(CONFIG_TRACE_IRQFLAGS)
2654 if (!debug_locks) 2711 if (!debug_locks)
2655 return; 2712 return;
2656 2713
@@ -2686,7 +2743,7 @@ static void check_flags(unsigned long flags)
2686 * and also avoid lockdep recursion: 2743 * and also avoid lockdep recursion:
2687 */ 2744 */
2688void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2745void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2689 int trylock, int read, int check, unsigned long ip) 2746 int trylock, int read, int check, unsigned long ip)
2690{ 2747{
2691 unsigned long flags; 2748 unsigned long flags;
2692 2749
@@ -2708,7 +2765,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2708 2765
2709EXPORT_SYMBOL_GPL(lock_acquire); 2766EXPORT_SYMBOL_GPL(lock_acquire);
2710 2767
2711void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 2768void lock_release(struct lockdep_map *lock, int nested,
2769 unsigned long ip)
2712{ 2770{
2713 unsigned long flags; 2771 unsigned long flags;
2714 2772