diff options
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r-- | kernel/lockdep_internals.h | 74 |
1 files changed, 53 insertions, 21 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index a2ee95ad1313..8d7d4b6c741a 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -110,29 +110,61 @@ lockdep_count_backward_deps(struct lock_class *class) | |||
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #ifdef CONFIG_DEBUG_LOCKDEP | 112 | #ifdef CONFIG_DEBUG_LOCKDEP |
113 | |||
114 | #include <asm/local.h> | ||
113 | /* | 115 | /* |
114 | * Various lockdep statistics: | 116 | * Various lockdep statistics. |
117 | * We want them per cpu as they are often accessed in fast path | ||
118 | * and we want to avoid too much cache bouncing. | ||
115 | */ | 119 | */ |
116 | extern atomic_t chain_lookup_hits; | 120 | struct lockdep_stats { |
117 | extern atomic_t chain_lookup_misses; | 121 | int chain_lookup_hits; |
118 | extern atomic_t hardirqs_on_events; | 122 | int chain_lookup_misses; |
119 | extern atomic_t hardirqs_off_events; | 123 | int hardirqs_on_events; |
120 | extern atomic_t redundant_hardirqs_on; | 124 | int hardirqs_off_events; |
121 | extern atomic_t redundant_hardirqs_off; | 125 | int redundant_hardirqs_on; |
122 | extern atomic_t softirqs_on_events; | 126 | int redundant_hardirqs_off; |
123 | extern atomic_t softirqs_off_events; | 127 | int softirqs_on_events; |
124 | extern atomic_t redundant_softirqs_on; | 128 | int softirqs_off_events; |
125 | extern atomic_t redundant_softirqs_off; | 129 | int redundant_softirqs_on; |
126 | extern atomic_t nr_unused_locks; | 130 | int redundant_softirqs_off; |
127 | extern atomic_t nr_cyclic_checks; | 131 | int nr_unused_locks; |
128 | extern atomic_t nr_cyclic_check_recursions; | 132 | int nr_cyclic_checks; |
129 | extern atomic_t nr_find_usage_forwards_checks; | 133 | int nr_cyclic_check_recursions; |
130 | extern atomic_t nr_find_usage_forwards_recursions; | 134 | int nr_find_usage_forwards_checks; |
131 | extern atomic_t nr_find_usage_backwards_checks; | 135 | int nr_find_usage_forwards_recursions; |
132 | extern atomic_t nr_find_usage_backwards_recursions; | 136 | int nr_find_usage_backwards_checks; |
133 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | 137 | int nr_find_usage_backwards_recursions; |
134 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | 138 | }; |
135 | # define debug_atomic_read(ptr) atomic_read(ptr) | 139 | |
140 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | ||
141 | |||
142 | #define debug_atomic_inc(ptr) { \ | ||
143 | struct lockdep_stats *__cpu_lockdep_stats; \ | ||
144 | \ | ||
145 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
146 | __cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \ | ||
147 | __cpu_lockdep_stats->ptr++; \ | ||
148 | } | ||
149 | |||
150 | #define debug_atomic_dec(ptr) { \ | ||
151 | struct lockdep_stats *__cpu_lockdep_stats; \ | ||
152 | \ | ||
153 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
154 | __cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \ | ||
155 | __cpu_lockdep_stats->ptr--; \ | ||
156 | } | ||
157 | |||
158 | #define debug_atomic_read(ptr) ({ \ | ||
159 | struct lockdep_stats *__cpu_lockdep_stats; \ | ||
160 | unsigned long long __total = 0; \ | ||
161 | int __cpu; \ | ||
162 | for_each_possible_cpu(__cpu) { \ | ||
163 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ | ||
164 | __total += __cpu_lockdep_stats->ptr; \ | ||
165 | } \ | ||
166 | __total; \ | ||
167 | }) | ||
136 | #else | 168 | #else |
137 | # define debug_atomic_inc(ptr) do { } while (0) | 169 | # define debug_atomic_inc(ptr) do { } while (0) |
138 | # define debug_atomic_dec(ptr) do { } while (0) | 170 | # define debug_atomic_dec(ptr) do { } while (0) |