diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 47 |
1 files changed, 28 insertions, 19 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f5dcd36d315..5feaddcdbe4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644); | |||
73 | * to use a raw spinlock - we really dont want the spinlock | 73 | * to use a raw spinlock - we really dont want the spinlock |
74 | * code to recurse back into the lockdep code... | 74 | * code to recurse back into the lockdep code... |
75 | */ | 75 | */ |
76 | static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 76 | static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
77 | 77 | ||
78 | static int graph_lock(void) | 78 | static int graph_lock(void) |
79 | { | 79 | { |
80 | __raw_spin_lock(&lockdep_lock); | 80 | arch_spin_lock(&lockdep_lock); |
81 | /* | 81 | /* |
82 | * Make sure that if another CPU detected a bug while | 82 | * Make sure that if another CPU detected a bug while |
83 | * walking the graph we dont change it (while the other | 83 | * walking the graph we dont change it (while the other |
@@ -85,7 +85,7 @@ static int graph_lock(void) | |||
85 | * dropped already) | 85 | * dropped already) |
86 | */ | 86 | */ |
87 | if (!debug_locks) { | 87 | if (!debug_locks) { |
88 | __raw_spin_unlock(&lockdep_lock); | 88 | arch_spin_unlock(&lockdep_lock); |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | /* prevent any recursions within lockdep from causing deadlocks */ | 91 | /* prevent any recursions within lockdep from causing deadlocks */ |
@@ -95,11 +95,11 @@ static int graph_lock(void) | |||
95 | 95 | ||
96 | static inline int graph_unlock(void) | 96 | static inline int graph_unlock(void) |
97 | { | 97 | { |
98 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) | 98 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) |
99 | return DEBUG_LOCKS_WARN_ON(1); | 99 | return DEBUG_LOCKS_WARN_ON(1); |
100 | 100 | ||
101 | current->lockdep_recursion--; | 101 | current->lockdep_recursion--; |
102 | __raw_spin_unlock(&lockdep_lock); | 102 | arch_spin_unlock(&lockdep_lock); |
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | 105 | ||
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) | |||
111 | { | 111 | { |
112 | int ret = debug_locks_off(); | 112 | int ret = debug_locks_off(); |
113 | 113 | ||
114 | __raw_spin_unlock(&lockdep_lock); | 114 | arch_spin_unlock(&lockdep_lock); |
115 | 115 | ||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], |
144 | cpu_lock_stats); | ||
144 | 145 | ||
145 | static inline u64 lockstat_clock(void) | 146 | static inline u64 lockstat_clock(void) |
146 | { | 147 | { |
@@ -168,7 +169,7 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
168 | if (time > lt->max) | 169 | if (time > lt->max) |
169 | lt->max = time; | 170 | lt->max = time; |
170 | 171 | ||
171 | if (time < lt->min || !lt->min) | 172 | if (time < lt->min || !lt->nr) |
172 | lt->min = time; | 173 | lt->min = time; |
173 | 174 | ||
174 | lt->total += time; | 175 | lt->total += time; |
@@ -177,8 +178,15 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
177 | 178 | ||
178 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) | 179 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) |
179 | { | 180 | { |
180 | dst->min += src->min; | 181 | if (!src->nr) |
181 | dst->max += src->max; | 182 | return; |
183 | |||
184 | if (src->max > dst->max) | ||
185 | dst->max = src->max; | ||
186 | |||
187 | if (src->min < dst->min || !dst->nr) | ||
188 | dst->min = src->min; | ||
189 | |||
182 | dst->total += src->total; | 190 | dst->total += src->total; |
183 | dst->nr += src->nr; | 191 | dst->nr += src->nr; |
184 | } | 192 | } |
@@ -191,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class) | |||
191 | memset(&stats, 0, sizeof(struct lock_class_stats)); | 199 | memset(&stats, 0, sizeof(struct lock_class_stats)); |
192 | for_each_possible_cpu(cpu) { | 200 | for_each_possible_cpu(cpu) { |
193 | struct lock_class_stats *pcs = | 201 | struct lock_class_stats *pcs = |
194 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 202 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
195 | 203 | ||
196 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) | 204 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) |
197 | stats.contention_point[i] += pcs->contention_point[i]; | 205 | stats.contention_point[i] += pcs->contention_point[i]; |
@@ -218,7 +226,7 @@ void clear_lock_stats(struct lock_class *class) | |||
218 | 226 | ||
219 | for_each_possible_cpu(cpu) { | 227 | for_each_possible_cpu(cpu) { |
220 | struct lock_class_stats *cpu_stats = | 228 | struct lock_class_stats *cpu_stats = |
221 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 229 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
222 | 230 | ||
223 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); | 231 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); |
224 | } | 232 | } |
@@ -228,12 +236,12 @@ void clear_lock_stats(struct lock_class *class) | |||
228 | 236 | ||
229 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | 237 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) |
230 | { | 238 | { |
231 | return &get_cpu_var(lock_stats)[class - lock_classes]; | 239 | return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; |
232 | } | 240 | } |
233 | 241 | ||
234 | static void put_lock_stats(struct lock_class_stats *stats) | 242 | static void put_lock_stats(struct lock_class_stats *stats) |
235 | { | 243 | { |
236 | put_cpu_var(lock_stats); | 244 | put_cpu_var(cpu_lock_stats); |
237 | } | 245 | } |
238 | 246 | ||
239 | static void lock_release_holdtime(struct held_lock *hlock) | 247 | static void lock_release_holdtime(struct held_lock *hlock) |
@@ -379,7 +387,8 @@ static int save_trace(struct stack_trace *trace) | |||
379 | * complete trace that maxes out the entries provided will be reported | 387 | * complete trace that maxes out the entries provided will be reported |
380 | * as incomplete, friggin useless </rant> | 388 | * as incomplete, friggin useless </rant> |
381 | */ | 389 | */ |
382 | if (trace->entries[trace->nr_entries-1] == ULONG_MAX) | 390 | if (trace->nr_entries != 0 && |
391 | trace->entries[trace->nr_entries-1] == ULONG_MAX) | ||
383 | trace->nr_entries--; | 392 | trace->nr_entries--; |
384 | 393 | ||
385 | trace->max_entries = trace->nr_entries; | 394 | trace->max_entries = trace->nr_entries; |
@@ -1161,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) | |||
1161 | this.class = class; | 1170 | this.class = class; |
1162 | 1171 | ||
1163 | local_irq_save(flags); | 1172 | local_irq_save(flags); |
1164 | __raw_spin_lock(&lockdep_lock); | 1173 | arch_spin_lock(&lockdep_lock); |
1165 | ret = __lockdep_count_forward_deps(&this); | 1174 | ret = __lockdep_count_forward_deps(&this); |
1166 | __raw_spin_unlock(&lockdep_lock); | 1175 | arch_spin_unlock(&lockdep_lock); |
1167 | local_irq_restore(flags); | 1176 | local_irq_restore(flags); |
1168 | 1177 | ||
1169 | return ret; | 1178 | return ret; |
@@ -1188,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1188 | this.class = class; | 1197 | this.class = class; |
1189 | 1198 | ||
1190 | local_irq_save(flags); | 1199 | local_irq_save(flags); |
1191 | __raw_spin_lock(&lockdep_lock); | 1200 | arch_spin_lock(&lockdep_lock); |
1192 | ret = __lockdep_count_backward_deps(&this); | 1201 | ret = __lockdep_count_backward_deps(&this); |
1193 | __raw_spin_unlock(&lockdep_lock); | 1202 | arch_spin_unlock(&lockdep_lock); |
1194 | local_irq_restore(flags); | 1203 | local_irq_restore(flags); |
1195 | 1204 | ||
1196 | return ret; | 1205 | return ret; |