diff options
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r-- | kernel/lockdep_internals.h | 72 |
1 files changed, 51 insertions, 21 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index a2ee95ad1313..4f560cfedc8f 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -110,30 +110,60 @@ lockdep_count_backward_deps(struct lock_class *class) | |||
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #ifdef CONFIG_DEBUG_LOCKDEP | 112 | #ifdef CONFIG_DEBUG_LOCKDEP |
113 | |||
114 | #include <asm/local.h> | ||
113 | /* | 115 | /* |
114 | * Various lockdep statistics: | 116 | * Various lockdep statistics. |
117 | * We want them per cpu as they are often accessed in fast path | ||
118 | * and we want to avoid too much cache bouncing. | ||
115 | */ | 119 | */ |
116 | extern atomic_t chain_lookup_hits; | 120 | struct lockdep_stats { |
117 | extern atomic_t chain_lookup_misses; | 121 | int chain_lookup_hits; |
118 | extern atomic_t hardirqs_on_events; | 122 | int chain_lookup_misses; |
119 | extern atomic_t hardirqs_off_events; | 123 | int hardirqs_on_events; |
120 | extern atomic_t redundant_hardirqs_on; | 124 | int hardirqs_off_events; |
121 | extern atomic_t redundant_hardirqs_off; | 125 | int redundant_hardirqs_on; |
122 | extern atomic_t softirqs_on_events; | 126 | int redundant_hardirqs_off; |
123 | extern atomic_t softirqs_off_events; | 127 | int softirqs_on_events; |
124 | extern atomic_t redundant_softirqs_on; | 128 | int softirqs_off_events; |
125 | extern atomic_t redundant_softirqs_off; | 129 | int redundant_softirqs_on; |
126 | extern atomic_t nr_unused_locks; | 130 | int redundant_softirqs_off; |
127 | extern atomic_t nr_cyclic_checks; | 131 | int nr_unused_locks; |
128 | extern atomic_t nr_cyclic_check_recursions; | 132 | int nr_cyclic_checks; |
129 | extern atomic_t nr_find_usage_forwards_checks; | 133 | int nr_cyclic_check_recursions; |
130 | extern atomic_t nr_find_usage_forwards_recursions; | 134 | int nr_find_usage_forwards_checks; |
131 | extern atomic_t nr_find_usage_backwards_checks; | 135 | int nr_find_usage_forwards_recursions; |
132 | extern atomic_t nr_find_usage_backwards_recursions; | 136 | int nr_find_usage_backwards_checks; |
133 | # define debug_atomic_inc(ptr) atomic_inc(ptr) | 137 | int nr_find_usage_backwards_recursions; |
134 | # define debug_atomic_dec(ptr) atomic_dec(ptr) | 138 | }; |
135 | # define debug_atomic_read(ptr) atomic_read(ptr) | 139 | |
140 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | ||
141 | |||
142 | #define __debug_atomic_inc(ptr) \ | ||
143 | this_cpu_inc(lockdep_stats.ptr); | ||
144 | |||
145 | #define debug_atomic_inc(ptr) { \ | ||
146 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
147 | __this_cpu_inc(lockdep_stats.ptr); \ | ||
148 | } | ||
149 | |||
150 | #define debug_atomic_dec(ptr) { \ | ||
151 | WARN_ON_ONCE(!irqs_disabled()); \ | ||
152 | __this_cpu_dec(lockdep_stats.ptr); \ | ||
153 | } | ||
154 | |||
155 | #define debug_atomic_read(ptr) ({ \ | ||
156 | struct lockdep_stats *__cpu_lockdep_stats; \ | ||
157 | unsigned long long __total = 0; \ | ||
158 | int __cpu; \ | ||
159 | for_each_possible_cpu(__cpu) { \ | ||
160 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ | ||
161 | __total += __cpu_lockdep_stats->ptr; \ | ||
162 | } \ | ||
163 | __total; \ | ||
164 | }) | ||
136 | #else | 165 | #else |
166 | # define __debug_atomic_inc(ptr) do { } while (0) | ||
137 | # define debug_atomic_inc(ptr) do { } while (0) | 167 | # define debug_atomic_inc(ptr) do { } while (0) |
138 | # define debug_atomic_dec(ptr) do { } while (0) | 168 | # define debug_atomic_dec(ptr) do { } while (0) |
139 | # define debug_atomic_read(ptr) 0 | 169 | # define debug_atomic_read(ptr) 0 |