aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 17:55:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 17:55:13 -0400
commit40e7babbb52b4b57721b9175aed7a14d93bf242f (patch)
treee16bc0a698c891922ca4c0166e0e08ac194718ed /kernel/lockdep.c
parent948769a5ba304ed3329a2f42ee3561f04a0b5692 (diff)
parentd12c1a37925a8ec386994169605fe99217295199 (diff)
Merge branch 'core/locking' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core/locking' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: fix kernel/fork.c warning lockdep: fix ftrace irq tracing false positive lockdep: remove duplicate definition of STATIC_LOCKDEP_MAP_INIT lockdep: add lock_class information to lock_chain and output it lockdep: add lock_class information to lock_chain and output it lockdep: output lock_class key instead of address for forward dependency output __mutex_lock_common: use signal_pending_state() mutex-debug: check mutex magic before owner Fixed up conflict in kernel/fork.c manually
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c47
1 files changed, 43 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 65548eff029e..d38a64362973 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1462,7 +1462,14 @@ out_bug:
1462} 1462}
1463 1463
1464unsigned long nr_lock_chains; 1464unsigned long nr_lock_chains;
1465static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 1465struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1466int nr_chain_hlocks;
1467static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1468
1469struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1470{
1471 return lock_classes + chain_hlocks[chain->base + i];
1472}
1466 1473
1467/* 1474/*
1468 * Look up a dependency chain. If the key is not present yet then 1475 * Look up a dependency chain. If the key is not present yet then
@@ -1470,10 +1477,15 @@ static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1470 * validated. If the key is already hashed, return 0. 1477 * validated. If the key is already hashed, return 0.
1471 * (On return with 1 graph_lock is held.) 1478 * (On return with 1 graph_lock is held.)
1472 */ 1479 */
1473static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) 1480static inline int lookup_chain_cache(struct task_struct *curr,
1481 struct held_lock *hlock,
1482 u64 chain_key)
1474{ 1483{
1484 struct lock_class *class = hlock->class;
1475 struct list_head *hash_head = chainhashentry(chain_key); 1485 struct list_head *hash_head = chainhashentry(chain_key);
1476 struct lock_chain *chain; 1486 struct lock_chain *chain;
1487 struct held_lock *hlock_curr, *hlock_next;
1488 int i, j, n, cn;
1477 1489
1478 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 1490 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1479 return 0; 1491 return 0;
@@ -1521,6 +1533,32 @@ cache_hit:
1521 } 1533 }
1522 chain = lock_chains + nr_lock_chains++; 1534 chain = lock_chains + nr_lock_chains++;
1523 chain->chain_key = chain_key; 1535 chain->chain_key = chain_key;
1536 chain->irq_context = hlock->irq_context;
1537 /* Find the first held_lock of current chain */
1538 hlock_next = hlock;
1539 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1540 hlock_curr = curr->held_locks + i;
1541 if (hlock_curr->irq_context != hlock_next->irq_context)
1542 break;
1543 hlock_next = hlock;
1544 }
1545 i++;
1546 chain->depth = curr->lockdep_depth + 1 - i;
1547 cn = nr_chain_hlocks;
1548 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1549 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1550 if (n == cn)
1551 break;
1552 cn = n;
1553 }
1554 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1555 chain->base = cn;
1556 for (j = 0; j < chain->depth - 1; j++, i++) {
1557 int lock_id = curr->held_locks[i].class - lock_classes;
1558 chain_hlocks[chain->base + j] = lock_id;
1559 }
1560 chain_hlocks[chain->base + j] = class - lock_classes;
1561 }
1524 list_add_tail_rcu(&chain->entry, hash_head); 1562 list_add_tail_rcu(&chain->entry, hash_head);
1525 debug_atomic_inc(&chain_lookup_misses); 1563 debug_atomic_inc(&chain_lookup_misses);
1526 inc_chains(); 1564 inc_chains();
@@ -1542,7 +1580,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1542 * graph_lock for us) 1580 * graph_lock for us)
1543 */ 1581 */
1544 if (!hlock->trylock && (hlock->check == 2) && 1582 if (!hlock->trylock && (hlock->check == 2) &&
1545 lookup_chain_cache(chain_key, hlock->class)) { 1583 lookup_chain_cache(curr, hlock, chain_key)) {
1546 /* 1584 /*
1547 * Check whether last held lock: 1585 * Check whether last held lock:
1548 * 1586 *
@@ -2668,7 +2706,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2668 */ 2706 */
2669static void check_flags(unsigned long flags) 2707static void check_flags(unsigned long flags)
2670{ 2708{
2671#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) 2709#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2710 defined(CONFIG_TRACE_IRQFLAGS)
2672 if (!debug_locks) 2711 if (!debug_locks)
2673 return; 2712 return;
2674 2713