aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c60
1 files changed, 41 insertions, 19 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3673a3f44d9d..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <trace/lockdep.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -433,13 +434,6 @@ atomic_t nr_find_usage_forwards_checks;
433atomic_t nr_find_usage_forwards_recursions; 434atomic_t nr_find_usage_forwards_recursions;
434atomic_t nr_find_usage_backwards_checks; 435atomic_t nr_find_usage_backwards_checks;
435atomic_t nr_find_usage_backwards_recursions; 436atomic_t nr_find_usage_backwards_recursions;
436# define debug_atomic_inc(ptr) atomic_inc(ptr)
437# define debug_atomic_dec(ptr) atomic_dec(ptr)
438# define debug_atomic_read(ptr) atomic_read(ptr)
439#else
440# define debug_atomic_inc(ptr) do { } while (0)
441# define debug_atomic_dec(ptr) do { } while (0)
442# define debug_atomic_read(ptr) 0
443#endif 437#endif
444 438
445/* 439/*
@@ -799,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
799 793
800 printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); 794 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
801 printk("turning off the locking correctness validator.\n"); 795 printk("turning off the locking correctness validator.\n");
796 dump_stack();
802 return NULL; 797 return NULL;
803 } 798 }
804 class = lock_classes + nr_lock_classes++; 799 class = lock_classes + nr_lock_classes++;
@@ -862,6 +857,7 @@ static struct lock_list *alloc_list_entry(void)
862 857
863 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); 858 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
864 printk("turning off the locking correctness validator.\n"); 859 printk("turning off the locking correctness validator.\n");
860 dump_stack();
865 return NULL; 861 return NULL;
866 } 862 }
867 return list_entries + nr_list_entries++; 863 return list_entries + nr_list_entries++;
@@ -1688,6 +1684,7 @@ cache_hit:
1688 1684
1689 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); 1685 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1690 printk("turning off the locking correctness validator.\n"); 1686 printk("turning off the locking correctness validator.\n");
1687 dump_stack();
1691 return 0; 1688 return 0;
1692 } 1689 }
1693 chain = lock_chains + nr_lock_chains++; 1690 chain = lock_chains + nr_lock_chains++;
@@ -1900,9 +1897,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1900 curr->comm, task_pid_nr(curr)); 1897 curr->comm, task_pid_nr(curr));
1901 print_lock(this); 1898 print_lock(this);
1902 if (forwards) 1899 if (forwards)
1903 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); 1900 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1904 else 1901 else
1905 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); 1902 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1906 print_lock_name(other); 1903 print_lock_name(other);
1907 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 1904 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1908 1905
@@ -2015,7 +2012,8 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2015 enum lock_usage_bit bit, const char *name); 2012 enum lock_usage_bit bit, const char *name);
2016 2013
2017static int 2014static int
2018mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) 2015mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2016 enum lock_usage_bit new_bit)
2019{ 2017{
2020 int excl_bit = exclusive_bit(new_bit); 2018 int excl_bit = exclusive_bit(new_bit);
2021 int read = new_bit & 1; 2019 int read = new_bit & 1;
@@ -2043,7 +2041,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit)
2043 * states. 2041 * states.
2044 */ 2042 */
2045 if ((!read || !dir || STRICT_READ_CHECKS) && 2043 if ((!read || !dir || STRICT_READ_CHECKS) &&
2046 !usage(curr, this, excl_bit, state_name(new_bit))) 2044 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2047 return 0; 2045 return 0;
2048 2046
2049 /* 2047 /*
@@ -2492,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2492void lockdep_init_map(struct lockdep_map *lock, const char *name, 2490void lockdep_init_map(struct lockdep_map *lock, const char *name,
2493 struct lock_class_key *key, int subclass) 2491 struct lock_class_key *key, int subclass)
2494{ 2492{
2495 if (unlikely(!debug_locks)) 2493 lock->class_cache = NULL;
2494#ifdef CONFIG_LOCK_STAT
2495 lock->cpu = raw_smp_processor_id();
2496#endif
2497
2498 if (DEBUG_LOCKS_WARN_ON(!name)) {
2499 lock->name = "NULL";
2496 return; 2500 return;
2501 }
2502
2503 lock->name = name;
2497 2504
2498 if (DEBUG_LOCKS_WARN_ON(!key)) 2505 if (DEBUG_LOCKS_WARN_ON(!key))
2499 return; 2506 return;
2500 if (DEBUG_LOCKS_WARN_ON(!name))
2501 return;
2502 /* 2507 /*
2503 * Sanity check, the lock-class key must be persistent: 2508 * Sanity check, the lock-class key must be persistent:
2504 */ 2509 */
@@ -2507,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2507 DEBUG_LOCKS_WARN_ON(1); 2512 DEBUG_LOCKS_WARN_ON(1);
2508 return; 2513 return;
2509 } 2514 }
2510 lock->name = name;
2511 lock->key = key; 2515 lock->key = key;
2512 lock->class_cache = NULL; 2516
2513#ifdef CONFIG_LOCK_STAT 2517 if (unlikely(!debug_locks))
2514 lock->cpu = raw_smp_processor_id(); 2518 return;
2515#endif 2519
2516 if (subclass) 2520 if (subclass)
2517 register_lock_class(lock, subclass, 1); 2521 register_lock_class(lock, subclass, 1);
2518} 2522}
@@ -2546,6 +2550,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2546 debug_locks_off(); 2550 debug_locks_off();
2547 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); 2551 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2548 printk("turning off the locking correctness validator.\n"); 2552 printk("turning off the locking correctness validator.\n");
2553 dump_stack();
2549 return 0; 2554 return 0;
2550 } 2555 }
2551 2556
@@ -2642,6 +2647,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2642 debug_locks_off(); 2647 debug_locks_off();
2643 printk("BUG: MAX_LOCK_DEPTH too low!\n"); 2648 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2644 printk("turning off the locking correctness validator.\n"); 2649 printk("turning off the locking correctness validator.\n");
2650 dump_stack();
2645 return 0; 2651 return 0;
2646 } 2652 }
2647 2653
@@ -2929,6 +2935,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
2929} 2935}
2930EXPORT_SYMBOL_GPL(lock_set_class); 2936EXPORT_SYMBOL_GPL(lock_set_class);
2931 2937
2938DEFINE_TRACE(lock_acquire);
2939
2932/* 2940/*
2933 * We are not always called with irqs disabled - do that here, 2941 * We are not always called with irqs disabled - do that here,
2934 * and also avoid lockdep recursion: 2942 * and also avoid lockdep recursion:
@@ -2939,6 +2947,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2939{ 2947{
2940 unsigned long flags; 2948 unsigned long flags;
2941 2949
2950 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
2951
2942 if (unlikely(current->lockdep_recursion)) 2952 if (unlikely(current->lockdep_recursion))
2943 return; 2953 return;
2944 2954
@@ -2953,11 +2963,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2953} 2963}
2954EXPORT_SYMBOL_GPL(lock_acquire); 2964EXPORT_SYMBOL_GPL(lock_acquire);
2955 2965
2966DEFINE_TRACE(lock_release);
2967
2956void lock_release(struct lockdep_map *lock, int nested, 2968void lock_release(struct lockdep_map *lock, int nested,
2957 unsigned long ip) 2969 unsigned long ip)
2958{ 2970{
2959 unsigned long flags; 2971 unsigned long flags;
2960 2972
2973 trace_lock_release(lock, nested, ip);
2974
2961 if (unlikely(current->lockdep_recursion)) 2975 if (unlikely(current->lockdep_recursion))
2962 return; 2976 return;
2963 2977
@@ -3106,10 +3120,14 @@ found_it:
3106 lock->ip = ip; 3120 lock->ip = ip;
3107} 3121}
3108 3122
3123DEFINE_TRACE(lock_contended);
3124
3109void lock_contended(struct lockdep_map *lock, unsigned long ip) 3125void lock_contended(struct lockdep_map *lock, unsigned long ip)
3110{ 3126{
3111 unsigned long flags; 3127 unsigned long flags;
3112 3128
3129 trace_lock_contended(lock, ip);
3130
3113 if (unlikely(!lock_stat)) 3131 if (unlikely(!lock_stat))
3114 return; 3132 return;
3115 3133
@@ -3125,10 +3143,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3125} 3143}
3126EXPORT_SYMBOL_GPL(lock_contended); 3144EXPORT_SYMBOL_GPL(lock_contended);
3127 3145
3146DEFINE_TRACE(lock_acquired);
3147
3128void lock_acquired(struct lockdep_map *lock, unsigned long ip) 3148void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3129{ 3149{
3130 unsigned long flags; 3150 unsigned long flags;
3131 3151
3152 trace_lock_acquired(lock, ip);
3153
3132 if (unlikely(!lock_stat)) 3154 if (unlikely(!lock_stat))
3133 return; 3155 return;
3134 3156