aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8631320a50d0..429540c70d3f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -49,7 +49,7 @@
49#include "lockdep_internals.h" 49#include "lockdep_internals.h"
50 50
51#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
52#include <trace/events/lockdep.h> 52#include <trace/events/lock.h>
53 53
54#ifdef CONFIG_PROVE_LOCKING 54#ifdef CONFIG_PROVE_LOCKING
55int prove_locking = 1; 55int prove_locking = 1;
@@ -143,6 +143,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
144 cpu_lock_stats); 144 cpu_lock_stats);
145 145
146static inline u64 lockstat_clock(void)
147{
148 return cpu_clock(smp_processor_id());
149}
150
146static int lock_point(unsigned long points[], unsigned long ip) 151static int lock_point(unsigned long points[], unsigned long ip)
147{ 152{
148 int i; 153 int i;
@@ -159,12 +164,12 @@ static int lock_point(unsigned long points[], unsigned long ip)
159 return i; 164 return i;
160} 165}
161 166
162static void lock_time_inc(struct lock_time *lt, s64 time) 167static void lock_time_inc(struct lock_time *lt, u64 time)
163{ 168{
164 if (time > lt->max) 169 if (time > lt->max)
165 lt->max = time; 170 lt->max = time;
166 171
167 if (time < lt->min || !lt->min) 172 if (time < lt->min || !lt->nr)
168 lt->min = time; 173 lt->min = time;
169 174
170 lt->total += time; 175 lt->total += time;
@@ -173,8 +178,15 @@ static void lock_time_inc(struct lock_time *lt, s64 time)
173 178
174static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 179static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
175{ 180{
176 dst->min += src->min; 181 if (!src->nr)
177 dst->max += src->max; 182 return;
183
184 if (src->max > dst->max)
185 dst->max = src->max;
186
187 if (src->min < dst->min || !dst->nr)
188 dst->min = src->min;
189
178 dst->total += src->total; 190 dst->total += src->total;
179 dst->nr += src->nr; 191 dst->nr += src->nr;
180} 192}
@@ -235,12 +247,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
235static void lock_release_holdtime(struct held_lock *hlock) 247static void lock_release_holdtime(struct held_lock *hlock)
236{ 248{
237 struct lock_class_stats *stats; 249 struct lock_class_stats *stats;
238 s64 holdtime; 250 u64 holdtime;
239 251
240 if (!lock_stat) 252 if (!lock_stat)
241 return; 253 return;
242 254
243 holdtime = sched_clock() - hlock->holdtime_stamp; 255 holdtime = lockstat_clock() - hlock->holdtime_stamp;
244 256
245 stats = get_lock_stats(hlock_class(hlock)); 257 stats = get_lock_stats(hlock_class(hlock));
246 if (hlock->read) 258 if (hlock->read)
@@ -375,7 +387,8 @@ static int save_trace(struct stack_trace *trace)
375 * complete trace that maxes out the entries provided will be reported 387 * complete trace that maxes out the entries provided will be reported
376 * as incomplete, friggin useless </rant> 388 * as incomplete, friggin useless </rant>
377 */ 389 */
378 if (trace->entries[trace->nr_entries-1] == ULONG_MAX) 390 if (trace->nr_entries != 0 &&
391 trace->entries[trace->nr_entries-1] == ULONG_MAX)
379 trace->nr_entries--; 392 trace->nr_entries--;
380 393
381 trace->max_entries = trace->nr_entries; 394 trace->max_entries = trace->nr_entries;
@@ -2793,7 +2806,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2793 hlock->references = references; 2806 hlock->references = references;
2794#ifdef CONFIG_LOCK_STAT 2807#ifdef CONFIG_LOCK_STAT
2795 hlock->waittime_stamp = 0; 2808 hlock->waittime_stamp = 0;
2796 hlock->holdtime_stamp = sched_clock(); 2809 hlock->holdtime_stamp = lockstat_clock();
2797#endif 2810#endif
2798 2811
2799 if (check == 2 && !mark_irqflags(curr, hlock)) 2812 if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3323,7 +3336,7 @@ found_it:
3323 if (hlock->instance != lock) 3336 if (hlock->instance != lock)
3324 return; 3337 return;
3325 3338
3326 hlock->waittime_stamp = sched_clock(); 3339 hlock->waittime_stamp = lockstat_clock();
3327 3340
3328 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3341 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3329 contending_point = lock_point(hlock_class(hlock)->contending_point, 3342 contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3346,8 +3359,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3346 struct held_lock *hlock, *prev_hlock; 3359 struct held_lock *hlock, *prev_hlock;
3347 struct lock_class_stats *stats; 3360 struct lock_class_stats *stats;
3348 unsigned int depth; 3361 unsigned int depth;
3349 u64 now; 3362 u64 now, waittime = 0;
3350 s64 waittime = 0;
3351 int i, cpu; 3363 int i, cpu;
3352 3364
3353 depth = curr->lockdep_depth; 3365 depth = curr->lockdep_depth;
@@ -3375,7 +3387,7 @@ found_it:
3375 3387
3376 cpu = smp_processor_id(); 3388 cpu = smp_processor_id();
3377 if (hlock->waittime_stamp) { 3389 if (hlock->waittime_stamp) {
3378 now = sched_clock(); 3390 now = lockstat_clock();
3379 waittime = now - hlock->waittime_stamp; 3391 waittime = now - hlock->waittime_stamp;
3380 hlock->holdtime_stamp = now; 3392 hlock->holdtime_stamp = now;
3381 } 3393 }