diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-01 15:54:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-01 18:49:02 -0400 |
commit | 8302294f43250dc337108c51882a6007f2b1e2e0 (patch) | |
tree | 85acd4440799c46a372df9cad170fa0c21e59096 /kernel/lockdep.c | |
parent | 4fe70410d9a219dabb47328effccae7e7f2a6e26 (diff) | |
parent | 2e572895bf3203e881356a4039ab0fa428ed2639 (diff) |
Merge branch 'tracing/core-v2' into tracing-for-linus
Conflicts:
include/linux/slub_def.h
lib/Kconfig.debug
mm/slob.c
mm/slub.c
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 33 |
1 files changed, 22 insertions, 11 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3673a3f44d9d..81b5f33970b8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <trace/lockdep.h> | ||
45 | 46 | ||
46 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
47 | 48 | ||
@@ -433,13 +434,6 @@ atomic_t nr_find_usage_forwards_checks; | |||
433 | atomic_t nr_find_usage_forwards_recursions; | 434 | atomic_t nr_find_usage_forwards_recursions; |
434 | atomic_t nr_find_usage_backwards_checks; | 435 | atomic_t nr_find_usage_backwards_checks; |
435 | atomic_t nr_find_usage_backwards_recursions; | 436 | atomic_t nr_find_usage_backwards_recursions; |
436 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | ||
437 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | ||
438 | # define debug_atomic_read(ptr) atomic_read(ptr) | ||
439 | #else | ||
440 | # define debug_atomic_inc(ptr) do { } while (0) | ||
441 | # define debug_atomic_dec(ptr) do { } while (0) | ||
442 | # define debug_atomic_read(ptr) 0 | ||
443 | #endif | 437 | #endif |
444 | 438 | ||
445 | /* | 439 | /* |
@@ -1900,9 +1894,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1900 | curr->comm, task_pid_nr(curr)); | 1894 | curr->comm, task_pid_nr(curr)); |
1901 | print_lock(this); | 1895 | print_lock(this); |
1902 | if (forwards) | 1896 | if (forwards) |
1903 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 1897 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
1904 | else | 1898 | else |
1905 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 1899 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
1906 | print_lock_name(other); | 1900 | print_lock_name(other); |
1907 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 1901 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
1908 | 1902 | ||
@@ -2015,7 +2009,8 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, | |||
2015 | enum lock_usage_bit bit, const char *name); | 2009 | enum lock_usage_bit bit, const char *name); |
2016 | 2010 | ||
2017 | static int | 2011 | static int |
2018 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) | 2012 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, |
2013 | enum lock_usage_bit new_bit) | ||
2019 | { | 2014 | { |
2020 | int excl_bit = exclusive_bit(new_bit); | 2015 | int excl_bit = exclusive_bit(new_bit); |
2021 | int read = new_bit & 1; | 2016 | int read = new_bit & 1; |
@@ -2043,7 +2038,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this, int new_bit) | |||
2043 | * states. | 2038 | * states. |
2044 | */ | 2039 | */ |
2045 | if ((!read || !dir || STRICT_READ_CHECKS) && | 2040 | if ((!read || !dir || STRICT_READ_CHECKS) && |
2046 | !usage(curr, this, excl_bit, state_name(new_bit))) | 2041 | !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
2047 | return 0; | 2042 | return 0; |
2048 | 2043 | ||
2049 | /* | 2044 | /* |
@@ -2929,6 +2924,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name, | |||
2929 | } | 2924 | } |
2930 | EXPORT_SYMBOL_GPL(lock_set_class); | 2925 | EXPORT_SYMBOL_GPL(lock_set_class); |
2931 | 2926 | ||
2927 | DEFINE_TRACE(lock_acquire); | ||
2928 | |||
2932 | /* | 2929 | /* |
2933 | * We are not always called with irqs disabled - do that here, | 2930 | * We are not always called with irqs disabled - do that here, |
2934 | * and also avoid lockdep recursion: | 2931 | * and also avoid lockdep recursion: |
@@ -2939,6 +2936,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2939 | { | 2936 | { |
2940 | unsigned long flags; | 2937 | unsigned long flags; |
2941 | 2938 | ||
2939 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
2940 | |||
2942 | if (unlikely(current->lockdep_recursion)) | 2941 | if (unlikely(current->lockdep_recursion)) |
2943 | return; | 2942 | return; |
2944 | 2943 | ||
@@ -2953,11 +2952,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2953 | } | 2952 | } |
2954 | EXPORT_SYMBOL_GPL(lock_acquire); | 2953 | EXPORT_SYMBOL_GPL(lock_acquire); |
2955 | 2954 | ||
2955 | DEFINE_TRACE(lock_release); | ||
2956 | |||
2956 | void lock_release(struct lockdep_map *lock, int nested, | 2957 | void lock_release(struct lockdep_map *lock, int nested, |
2957 | unsigned long ip) | 2958 | unsigned long ip) |
2958 | { | 2959 | { |
2959 | unsigned long flags; | 2960 | unsigned long flags; |
2960 | 2961 | ||
2962 | trace_lock_release(lock, nested, ip); | ||
2963 | |||
2961 | if (unlikely(current->lockdep_recursion)) | 2964 | if (unlikely(current->lockdep_recursion)) |
2962 | return; | 2965 | return; |
2963 | 2966 | ||
@@ -3106,10 +3109,14 @@ found_it: | |||
3106 | lock->ip = ip; | 3109 | lock->ip = ip; |
3107 | } | 3110 | } |
3108 | 3111 | ||
3112 | DEFINE_TRACE(lock_contended); | ||
3113 | |||
3109 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | 3114 | void lock_contended(struct lockdep_map *lock, unsigned long ip) |
3110 | { | 3115 | { |
3111 | unsigned long flags; | 3116 | unsigned long flags; |
3112 | 3117 | ||
3118 | trace_lock_contended(lock, ip); | ||
3119 | |||
3113 | if (unlikely(!lock_stat)) | 3120 | if (unlikely(!lock_stat)) |
3114 | return; | 3121 | return; |
3115 | 3122 | ||
@@ -3125,10 +3132,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3125 | } | 3132 | } |
3126 | EXPORT_SYMBOL_GPL(lock_contended); | 3133 | EXPORT_SYMBOL_GPL(lock_contended); |
3127 | 3134 | ||
3135 | DEFINE_TRACE(lock_acquired); | ||
3136 | |||
3128 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3137 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3129 | { | 3138 | { |
3130 | unsigned long flags; | 3139 | unsigned long flags; |
3131 | 3140 | ||
3141 | trace_lock_acquired(lock, ip); | ||
3142 | |||
3132 | if (unlikely(!lock_stat)) | 3143 | if (unlikely(!lock_stat)) |
3133 | return; | 3144 | return; |
3134 | 3145 | ||