diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 38 |
1 files changed, 27 insertions, 11 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3673a3f44d9d..b0f011866969 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <trace/lockdep.h> | ||
45 | 46 | ||
46 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
47 | 48 | ||
@@ -433,13 +434,6 @@ atomic_t nr_find_usage_forwards_checks; | |||
433 | atomic_t nr_find_usage_forwards_recursions; | 434 | atomic_t nr_find_usage_forwards_recursions; |
434 | atomic_t nr_find_usage_backwards_checks; | 435 | atomic_t nr_find_usage_backwards_checks; |
435 | atomic_t nr_find_usage_backwards_recursions; | 436 | atomic_t nr_find_usage_backwards_recursions; |
436 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | ||
437 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | ||
438 | # define debug_atomic_read(ptr) atomic_read(ptr) | ||
439 | #else | ||
440 | # define debug_atomic_inc(ptr) do { } while (0) | ||
441 | # define debug_atomic_dec(ptr) do { } while (0) | ||
442 | # define debug_atomic_read(ptr) 0 | ||
443 | #endif | 437 | #endif |
444 | 438 | ||
445 | /* | 439 | /* |
@@ -799,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
799 | 793 | ||
800 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 794 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); |
801 | printk("turning off the locking correctness validator.\n"); | 795 | printk("turning off the locking correctness validator.\n"); |
796 | dump_stack(); | ||
802 | return NULL; | 797 | return NULL; |
803 | } | 798 | } |
804 | class = lock_classes + nr_lock_classes++; | 799 | class = lock_classes + nr_lock_classes++; |
@@ -862,6 +857,7 @@ static struct lock_list *alloc_list_entry(void) | |||
862 | 857 | ||
863 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | 858 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); |
864 | printk("turning off the locking correctness validator.\n"); | 859 | printk("turning off the locking correctness validator.\n"); |
860 | dump_stack(); | ||
865 | return NULL; | 861 | return NULL; |
866 | } | 862 | } |
867 | return list_entries + nr_list_entries++; | 863 | return list_entries + nr_list_entries++; |
@@ -1688,6 +1684,7 @@ cache_hit: | |||
1688 | 1684 | ||
1689 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | 1685 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); |
1690 | printk("turning off the locking correctness validator.\n"); | 1686 | printk("turning off the locking correctness validator.\n"); |
1687 | dump_stack(); | ||
1691 | return 0; | 1688 | return 0; |
1692 | } | 1689 | } |
1693 | chain = lock_chains + nr_lock_chains++; | 1690 | chain = lock_chains + nr_lock_chains++; |
@@ -1900,9 +1897,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1900 | curr->comm, task_pid_nr(curr)); | 1897 | curr->comm, task_pid_nr(curr)); |
1901 | print_lock(this); | 1898 | print_lock(this); |
1902 | if (forwards) | 1899 | if (forwards) |
1903 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 1900 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
1904 | else | 1901 | else |
1905 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 1902 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
1906 | print_lock_name(other); | 1903 | print_lock_name(other); |
1907 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 1904 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
1908 | 1905 | ||
@@ -2015,7 +2012,8 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, | |||
2015 | enum lock_usage_bit bit, const char *name); | 2012 | enum lock_usage_bit bit, const char *name); |
2016 | 2013 | ||
2017 | static int | 2014 | static int |
2018 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) | 2015 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
2016 | enum lock_usage_bit new_bit) | ||
2019 | { | 2017 | { |
2020 | int excl_bit = exclusive_bit(new_bit); | 2018 | int excl_bit = exclusive_bit(new_bit); |
2021 | int read = new_bit & 1; | 2019 | int read = new_bit & 1; |
@@ -2043,7 +2041,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) | |||
2043 | * states. | 2041 | * states. |
2044 | */ | 2042 | */ |
2045 | if ((!read || !dir || STRICT_READ_CHECKS) && | 2043 | if ((!read || !dir || STRICT_READ_CHECKS) && |
2046 | !usage(curr, this, excl_bit, state_name(new_bit))) | 2044 | !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
2047 | return 0; | 2045 | return 0; |
2048 | 2046 | ||
2049 | /* | 2047 | /* |
@@ -2546,6 +2544,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2546 | debug_locks_off(); | 2544 | debug_locks_off(); |
2547 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | 2545 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); |
2548 | printk("turning off the locking correctness validator.\n"); | 2546 | printk("turning off the locking correctness validator.\n"); |
2547 | dump_stack(); | ||
2549 | return 0; | 2548 | return 0; |
2550 | } | 2549 | } |
2551 | 2550 | ||
@@ -2642,6 +2641,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2642 | debug_locks_off(); | 2641 | debug_locks_off(); |
2643 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); | 2642 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); |
2644 | printk("turning off the locking correctness validator.\n"); | 2643 | printk("turning off the locking correctness validator.\n"); |
2644 | dump_stack(); | ||
2645 | return 0; | 2645 | return 0; |
2646 | } | 2646 | } |
2647 | 2647 | ||
@@ -2929,6 +2929,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name, | |||
2929 | } | 2929 | } |
2930 | EXPORT_SYMBOL_GPL(lock_set_class); | 2930 | EXPORT_SYMBOL_GPL(lock_set_class); |
2931 | 2931 | ||
2932 | DEFINE_TRACE(lock_acquire); | ||
2933 | |||
2932 | /* | 2934 | /* |
2933 | * We are not always called with irqs disabled - do that here, | 2935 | * We are not always called with irqs disabled - do that here, |
2934 | * and also avoid lockdep recursion: | 2936 | * and also avoid lockdep recursion: |
@@ -2939,6 +2941,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2939 | { | 2941 | { |
2940 | unsigned long flags; | 2942 | unsigned long flags; |
2941 | 2943 | ||
2944 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
2945 | |||
2942 | if (unlikely(current->lockdep_recursion)) | 2946 | if (unlikely(current->lockdep_recursion)) |
2943 | return; | 2947 | return; |
2944 | 2948 | ||
@@ -2953,11 +2957,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2953 | } | 2957 | } |
2954 | EXPORT_SYMBOL_GPL(lock_acquire); | 2958 | EXPORT_SYMBOL_GPL(lock_acquire); |
2955 | 2959 | ||
2960 | DEFINE_TRACE(lock_release); | ||
2961 | |||
2956 | void lock_release(struct lockdep_map *lock, int nested, | 2962 | void lock_release(struct lockdep_map *lock, int nested, |
2957 | unsigned long ip) | 2963 | unsigned long ip) |
2958 | { | 2964 | { |
2959 | unsigned long flags; | 2965 | unsigned long flags; |
2960 | 2966 | ||
2967 | trace_lock_release(lock, nested, ip); | ||
2968 | |||
2961 | if (unlikely(current->lockdep_recursion)) | 2969 | if (unlikely(current->lockdep_recursion)) |
2962 | return; | 2970 | return; |
2963 | 2971 | ||
@@ -3106,10 +3114,14 @@ found_it: | |||
3106 | lock->ip = ip; | 3114 | lock->ip = ip; |
3107 | } | 3115 | } |
3108 | 3116 | ||
3117 | DEFINE_TRACE(lock_contended); | ||
3118 | |||
3109 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | 3119 | void lock_contended(struct lockdep_map *lock, unsigned long ip) |
3110 | { | 3120 | { |
3111 | unsigned long flags; | 3121 | unsigned long flags; |
3112 | 3122 | ||
3123 | trace_lock_contended(lock, ip); | ||
3124 | |||
3113 | if (unlikely(!lock_stat)) | 3125 | if (unlikely(!lock_stat)) |
3114 | return; | 3126 | return; |
3115 | 3127 | ||
@@ -3125,10 +3137,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3125 | } | 3137 | } |
3126 | EXPORT_SYMBOL_GPL(lock_contended); | 3138 | EXPORT_SYMBOL_GPL(lock_contended); |
3127 | 3139 | ||
3140 | DEFINE_TRACE(lock_acquired); | ||
3141 | |||
3128 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3142 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3129 | { | 3143 | { |
3130 | unsigned long flags; | 3144 | unsigned long flags; |
3131 | 3145 | ||
3146 | trace_lock_acquired(lock, ip); | ||
3147 | |||
3132 | if (unlikely(!lock_stat)) | 3148 | if (unlikely(!lock_stat)) |
3133 | return; | 3149 | return; |
3134 | 3150 | ||