diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 101 |
1 files changed, 58 insertions, 43 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 9af56723c096..2594e1ce41cb 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -43,13 +43,14 @@ | |||
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | #include <linux/gfp.h> | ||
46 | 47 | ||
47 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
48 | 49 | ||
49 | #include "lockdep_internals.h" | 50 | #include "lockdep_internals.h" |
50 | 51 | ||
51 | #define CREATE_TRACE_POINTS | 52 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/lockdep.h> | 53 | #include <trace/events/lock.h> |
53 | 54 | ||
54 | #ifdef CONFIG_PROVE_LOCKING | 55 | #ifdef CONFIG_PROVE_LOCKING |
55 | int prove_locking = 1; | 56 | int prove_locking = 1; |
@@ -73,11 +74,11 @@ module_param(lock_stat, int, 0644); | |||
73 | * to use a raw spinlock - we really dont want the spinlock | 74 | * to use a raw spinlock - we really dont want the spinlock |
74 | * code to recurse back into the lockdep code... | 75 | * code to recurse back into the lockdep code... |
75 | */ | 76 | */ |
76 | static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 77 | static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
77 | 78 | ||
78 | static int graph_lock(void) | 79 | static int graph_lock(void) |
79 | { | 80 | { |
80 | __raw_spin_lock(&lockdep_lock); | 81 | arch_spin_lock(&lockdep_lock); |
81 | /* | 82 | /* |
82 | * Make sure that if another CPU detected a bug while | 83 | * Make sure that if another CPU detected a bug while |
83 | * walking the graph we dont change it (while the other | 84 | * walking the graph we dont change it (while the other |
@@ -85,7 +86,7 @@ static int graph_lock(void) | |||
85 | * dropped already) | 86 | * dropped already) |
86 | */ | 87 | */ |
87 | if (!debug_locks) { | 88 | if (!debug_locks) { |
88 | __raw_spin_unlock(&lockdep_lock); | 89 | arch_spin_unlock(&lockdep_lock); |
89 | return 0; | 90 | return 0; |
90 | } | 91 | } |
91 | /* prevent any recursions within lockdep from causing deadlocks */ | 92 | /* prevent any recursions within lockdep from causing deadlocks */ |
@@ -95,11 +96,11 @@ static int graph_lock(void) | |||
95 | 96 | ||
96 | static inline int graph_unlock(void) | 97 | static inline int graph_unlock(void) |
97 | { | 98 | { |
98 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) | 99 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) |
99 | return DEBUG_LOCKS_WARN_ON(1); | 100 | return DEBUG_LOCKS_WARN_ON(1); |
100 | 101 | ||
101 | current->lockdep_recursion--; | 102 | current->lockdep_recursion--; |
102 | __raw_spin_unlock(&lockdep_lock); | 103 | arch_spin_unlock(&lockdep_lock); |
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
@@ -111,7 +112,7 @@ static inline int debug_locks_off_graph_unlock(void) | |||
111 | { | 112 | { |
112 | int ret = debug_locks_off(); | 113 | int ret = debug_locks_off(); |
113 | 114 | ||
114 | __raw_spin_unlock(&lockdep_lock); | 115 | arch_spin_unlock(&lockdep_lock); |
115 | 116 | ||
116 | return ret; | 117 | return ret; |
117 | } | 118 | } |
@@ -140,7 +141,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
140 | } | 141 | } |
141 | 142 | ||
142 | #ifdef CONFIG_LOCK_STAT | 143 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 144 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], |
145 | cpu_lock_stats); | ||
144 | 146 | ||
145 | static inline u64 lockstat_clock(void) | 147 | static inline u64 lockstat_clock(void) |
146 | { | 148 | { |
@@ -168,7 +170,7 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
168 | if (time > lt->max) | 170 | if (time > lt->max) |
169 | lt->max = time; | 171 | lt->max = time; |
170 | 172 | ||
171 | if (time < lt->min || !lt->min) | 173 | if (time < lt->min || !lt->nr) |
172 | lt->min = time; | 174 | lt->min = time; |
173 | 175 | ||
174 | lt->total += time; | 176 | lt->total += time; |
@@ -177,8 +179,15 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
177 | 179 | ||
178 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) | 180 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) |
179 | { | 181 | { |
180 | dst->min += src->min; | 182 | if (!src->nr) |
181 | dst->max += src->max; | 183 | return; |
184 | |||
185 | if (src->max > dst->max) | ||
186 | dst->max = src->max; | ||
187 | |||
188 | if (src->min < dst->min || !dst->nr) | ||
189 | dst->min = src->min; | ||
190 | |||
182 | dst->total += src->total; | 191 | dst->total += src->total; |
183 | dst->nr += src->nr; | 192 | dst->nr += src->nr; |
184 | } | 193 | } |
@@ -191,7 +200,7 @@ struct lock_class_stats lock_stats(struct lock_class *class) | |||
191 | memset(&stats, 0, sizeof(struct lock_class_stats)); | 200 | memset(&stats, 0, sizeof(struct lock_class_stats)); |
192 | for_each_possible_cpu(cpu) { | 201 | for_each_possible_cpu(cpu) { |
193 | struct lock_class_stats *pcs = | 202 | struct lock_class_stats *pcs = |
194 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 203 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
195 | 204 | ||
196 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) | 205 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) |
197 | stats.contention_point[i] += pcs->contention_point[i]; | 206 | stats.contention_point[i] += pcs->contention_point[i]; |
@@ -218,7 +227,7 @@ void clear_lock_stats(struct lock_class *class) | |||
218 | 227 | ||
219 | for_each_possible_cpu(cpu) { | 228 | for_each_possible_cpu(cpu) { |
220 | struct lock_class_stats *cpu_stats = | 229 | struct lock_class_stats *cpu_stats = |
221 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 230 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
222 | 231 | ||
223 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); | 232 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); |
224 | } | 233 | } |
@@ -228,12 +237,12 @@ void clear_lock_stats(struct lock_class *class) | |||
228 | 237 | ||
229 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | 238 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) |
230 | { | 239 | { |
231 | return &get_cpu_var(lock_stats)[class - lock_classes]; | 240 | return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; |
232 | } | 241 | } |
233 | 242 | ||
234 | static void put_lock_stats(struct lock_class_stats *stats) | 243 | static void put_lock_stats(struct lock_class_stats *stats) |
235 | { | 244 | { |
236 | put_cpu_var(lock_stats); | 245 | put_cpu_var(cpu_lock_stats); |
237 | } | 246 | } |
238 | 247 | ||
239 | static void lock_release_holdtime(struct held_lock *hlock) | 248 | static void lock_release_holdtime(struct held_lock *hlock) |
@@ -379,7 +388,8 @@ static int save_trace(struct stack_trace *trace) | |||
379 | * complete trace that maxes out the entries provided will be reported | 388 | * complete trace that maxes out the entries provided will be reported |
380 | * as incomplete, friggin useless </rant> | 389 | * as incomplete, friggin useless </rant> |
381 | */ | 390 | */ |
382 | if (trace->entries[trace->nr_entries-1] == ULONG_MAX) | 391 | if (trace->nr_entries != 0 && |
392 | trace->entries[trace->nr_entries-1] == ULONG_MAX) | ||
383 | trace->nr_entries--; | 393 | trace->nr_entries--; |
384 | 394 | ||
385 | trace->max_entries = trace->nr_entries; | 395 | trace->max_entries = trace->nr_entries; |
@@ -573,9 +583,6 @@ static int static_obj(void *obj) | |||
573 | unsigned long start = (unsigned long) &_stext, | 583 | unsigned long start = (unsigned long) &_stext, |
574 | end = (unsigned long) &_end, | 584 | end = (unsigned long) &_end, |
575 | addr = (unsigned long) obj; | 585 | addr = (unsigned long) obj; |
576 | #ifdef CONFIG_SMP | ||
577 | int i; | ||
578 | #endif | ||
579 | 586 | ||
580 | /* | 587 | /* |
581 | * static variable? | 588 | * static variable? |
@@ -586,24 +593,16 @@ static int static_obj(void *obj) | |||
586 | if (arch_is_kernel_data(addr)) | 593 | if (arch_is_kernel_data(addr)) |
587 | return 1; | 594 | return 1; |
588 | 595 | ||
589 | #ifdef CONFIG_SMP | ||
590 | /* | 596 | /* |
591 | * percpu var? | 597 | * in-kernel percpu var? |
592 | */ | 598 | */ |
593 | for_each_possible_cpu(i) { | 599 | if (is_kernel_percpu_address(addr)) |
594 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 600 | return 1; |
595 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM | ||
596 | + per_cpu_offset(i); | ||
597 | |||
598 | if ((addr >= start) && (addr < end)) | ||
599 | return 1; | ||
600 | } | ||
601 | #endif | ||
602 | 601 | ||
603 | /* | 602 | /* |
604 | * module var? | 603 | * module static or percpu var? |
605 | */ | 604 | */ |
606 | return is_module_address(addr); | 605 | return is_module_address(addr) || is_module_percpu_address(addr); |
607 | } | 606 | } |
608 | 607 | ||
609 | /* | 608 | /* |
@@ -1161,9 +1160,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) | |||
1161 | this.class = class; | 1160 | this.class = class; |
1162 | 1161 | ||
1163 | local_irq_save(flags); | 1162 | local_irq_save(flags); |
1164 | __raw_spin_lock(&lockdep_lock); | 1163 | arch_spin_lock(&lockdep_lock); |
1165 | ret = __lockdep_count_forward_deps(&this); | 1164 | ret = __lockdep_count_forward_deps(&this); |
1166 | __raw_spin_unlock(&lockdep_lock); | 1165 | arch_spin_unlock(&lockdep_lock); |
1167 | local_irq_restore(flags); | 1166 | local_irq_restore(flags); |
1168 | 1167 | ||
1169 | return ret; | 1168 | return ret; |
@@ -1188,9 +1187,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1188 | this.class = class; | 1187 | this.class = class; |
1189 | 1188 | ||
1190 | local_irq_save(flags); | 1189 | local_irq_save(flags); |
1191 | __raw_spin_lock(&lockdep_lock); | 1190 | arch_spin_lock(&lockdep_lock); |
1192 | ret = __lockdep_count_backward_deps(&this); | 1191 | ret = __lockdep_count_backward_deps(&this); |
1193 | __raw_spin_unlock(&lockdep_lock); | 1192 | arch_spin_unlock(&lockdep_lock); |
1194 | local_irq_restore(flags); | 1193 | local_irq_restore(flags); |
1195 | 1194 | ||
1196 | return ret; | 1195 | return ret; |
@@ -2138,7 +2137,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
2138 | return ret; | 2137 | return ret; |
2139 | 2138 | ||
2140 | return print_irq_inversion_bug(curr, &root, target_entry, | 2139 | return print_irq_inversion_bug(curr, &root, target_entry, |
2141 | this, 1, irqclass); | 2140 | this, 0, irqclass); |
2142 | } | 2141 | } |
2143 | 2142 | ||
2144 | void print_irqtrace_events(struct task_struct *curr) | 2143 | void print_irqtrace_events(struct task_struct *curr) |
@@ -3202,8 +3201,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3202 | { | 3201 | { |
3203 | unsigned long flags; | 3202 | unsigned long flags; |
3204 | 3203 | ||
3205 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3206 | |||
3207 | if (unlikely(current->lockdep_recursion)) | 3204 | if (unlikely(current->lockdep_recursion)) |
3208 | return; | 3205 | return; |
3209 | 3206 | ||
@@ -3211,6 +3208,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3211 | check_flags(flags); | 3208 | check_flags(flags); |
3212 | 3209 | ||
3213 | current->lockdep_recursion = 1; | 3210 | current->lockdep_recursion = 1; |
3211 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3214 | __lock_acquire(lock, subclass, trylock, read, check, | 3212 | __lock_acquire(lock, subclass, trylock, read, check, |
3215 | irqs_disabled_flags(flags), nest_lock, ip, 0); | 3213 | irqs_disabled_flags(flags), nest_lock, ip, 0); |
3216 | current->lockdep_recursion = 0; | 3214 | current->lockdep_recursion = 0; |
@@ -3223,14 +3221,13 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
3223 | { | 3221 | { |
3224 | unsigned long flags; | 3222 | unsigned long flags; |
3225 | 3223 | ||
3226 | trace_lock_release(lock, nested, ip); | ||
3227 | |||
3228 | if (unlikely(current->lockdep_recursion)) | 3224 | if (unlikely(current->lockdep_recursion)) |
3229 | return; | 3225 | return; |
3230 | 3226 | ||
3231 | raw_local_irq_save(flags); | 3227 | raw_local_irq_save(flags); |
3232 | check_flags(flags); | 3228 | check_flags(flags); |
3233 | current->lockdep_recursion = 1; | 3229 | current->lockdep_recursion = 1; |
3230 | trace_lock_release(lock, nested, ip); | ||
3234 | __lock_release(lock, nested, ip); | 3231 | __lock_release(lock, nested, ip); |
3235 | current->lockdep_recursion = 0; | 3232 | current->lockdep_recursion = 0; |
3236 | raw_local_irq_restore(flags); | 3233 | raw_local_irq_restore(flags); |
@@ -3404,8 +3401,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3404 | { | 3401 | { |
3405 | unsigned long flags; | 3402 | unsigned long flags; |
3406 | 3403 | ||
3407 | trace_lock_contended(lock, ip); | ||
3408 | |||
3409 | if (unlikely(!lock_stat)) | 3404 | if (unlikely(!lock_stat)) |
3410 | return; | 3405 | return; |
3411 | 3406 | ||
@@ -3415,6 +3410,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3415 | raw_local_irq_save(flags); | 3410 | raw_local_irq_save(flags); |
3416 | check_flags(flags); | 3411 | check_flags(flags); |
3417 | current->lockdep_recursion = 1; | 3412 | current->lockdep_recursion = 1; |
3413 | trace_lock_contended(lock, ip); | ||
3418 | __lock_contended(lock, ip); | 3414 | __lock_contended(lock, ip); |
3419 | current->lockdep_recursion = 0; | 3415 | current->lockdep_recursion = 0; |
3420 | raw_local_irq_restore(flags); | 3416 | raw_local_irq_restore(flags); |
@@ -3800,3 +3796,22 @@ void lockdep_sys_exit(void) | |||
3800 | lockdep_print_held_locks(curr); | 3796 | lockdep_print_held_locks(curr); |
3801 | } | 3797 | } |
3802 | } | 3798 | } |
3799 | |||
3800 | void lockdep_rcu_dereference(const char *file, const int line) | ||
3801 | { | ||
3802 | struct task_struct *curr = current; | ||
3803 | |||
3804 | if (!debug_locks_off()) | ||
3805 | return; | ||
3806 | printk("\n===================================================\n"); | ||
3807 | printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); | ||
3808 | printk( "---------------------------------------------------\n"); | ||
3809 | printk("%s:%d invoked rcu_dereference_check() without protection!\n", | ||
3810 | file, line); | ||
3811 | printk("\nother info that might help us debug this:\n\n"); | ||
3812 | printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); | ||
3813 | lockdep_print_held_locks(curr); | ||
3814 | printk("\nstack backtrace:\n"); | ||
3815 | dump_stack(); | ||
3816 | } | ||
3817 | EXPORT_SYMBOL_GPL(lockdep_rcu_dereference); | ||