aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c82
1 files changed, 43 insertions, 39 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index dbda475b13bd..06b0c3568f0b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -25,6 +25,7 @@
25 * Thanks to Arjan van de Ven for coming up with the initial idea of 25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime. 26 * mapping lock dependencies runtime.
27 */ 27 */
28#define DISABLE_BRANCH_PROFILING
28#include <linux/mutex.h> 29#include <linux/mutex.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
@@ -136,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
136#ifdef CONFIG_LOCK_STAT 137#ifdef CONFIG_LOCK_STAT
137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 138static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
138 139
139static int lock_contention_point(struct lock_class *class, unsigned long ip) 140static int lock_point(unsigned long points[], unsigned long ip)
140{ 141{
141 int i; 142 int i;
142 143
143 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 144 for (i = 0; i < LOCKSTAT_POINTS; i++) {
144 if (class->contention_point[i] == 0) { 145 if (points[i] == 0) {
145 class->contention_point[i] = ip; 146 points[i] = ip;
146 break; 147 break;
147 } 148 }
148 if (class->contention_point[i] == ip) 149 if (points[i] == ip)
149 break; 150 break;
150 } 151 }
151 152
@@ -185,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 186 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
186 stats.contention_point[i] += pcs->contention_point[i]; 187 stats.contention_point[i] += pcs->contention_point[i];
187 188
189 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
190 stats.contending_point[i] += pcs->contending_point[i];
191
188 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 192 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
189 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 193 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
190 194
@@ -209,6 +213,7 @@ void clear_lock_stats(struct lock_class *class)
209 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 213 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
210 } 214 }
211 memset(class->contention_point, 0, sizeof(class->contention_point)); 215 memset(class->contention_point, 0, sizeof(class->contention_point));
216 memset(class->contending_point, 0, sizeof(class->contending_point));
212} 217}
213 218
214static struct lock_class_stats *get_lock_stats(struct lock_class *class) 219static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -287,14 +292,12 @@ void lockdep_off(void)
287{ 292{
288 current->lockdep_recursion++; 293 current->lockdep_recursion++;
289} 294}
290
291EXPORT_SYMBOL(lockdep_off); 295EXPORT_SYMBOL(lockdep_off);
292 296
293void lockdep_on(void) 297void lockdep_on(void)
294{ 298{
295 current->lockdep_recursion--; 299 current->lockdep_recursion--;
296} 300}
297
298EXPORT_SYMBOL(lockdep_on); 301EXPORT_SYMBOL(lockdep_on);
299 302
300/* 303/*
@@ -576,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
576/* 579/*
577 * printk all lock dependencies starting at <entry>: 580 * printk all lock dependencies starting at <entry>:
578 */ 581 */
579static void print_lock_dependencies(struct lock_class *class, int depth) 582static void __used
583print_lock_dependencies(struct lock_class *class, int depth)
580{ 584{
581 struct lock_list *entry; 585 struct lock_list *entry;
582 586
@@ -2169,12 +2173,11 @@ void early_boot_irqs_on(void)
2169/* 2173/*
2170 * Hardirqs will be enabled: 2174 * Hardirqs will be enabled:
2171 */ 2175 */
2172void trace_hardirqs_on_caller(unsigned long a0) 2176void trace_hardirqs_on_caller(unsigned long ip)
2173{ 2177{
2174 struct task_struct *curr = current; 2178 struct task_struct *curr = current;
2175 unsigned long ip;
2176 2179
2177 time_hardirqs_on(CALLER_ADDR0, a0); 2180 time_hardirqs_on(CALLER_ADDR0, ip);
2178 2181
2179 if (unlikely(!debug_locks || current->lockdep_recursion)) 2182 if (unlikely(!debug_locks || current->lockdep_recursion))
2180 return; 2183 return;
@@ -2188,7 +2191,6 @@ void trace_hardirqs_on_caller(unsigned long a0)
2188 } 2191 }
2189 /* we'll do an OFF -> ON transition: */ 2192 /* we'll do an OFF -> ON transition: */
2190 curr->hardirqs_enabled = 1; 2193 curr->hardirqs_enabled = 1;
2191 ip = (unsigned long) __builtin_return_address(0);
2192 2194
2193 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2195 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2194 return; 2196 return;
@@ -2224,11 +2226,11 @@ EXPORT_SYMBOL(trace_hardirqs_on);
2224/* 2226/*
2225 * Hardirqs were disabled: 2227 * Hardirqs were disabled:
2226 */ 2228 */
2227void trace_hardirqs_off_caller(unsigned long a0) 2229void trace_hardirqs_off_caller(unsigned long ip)
2228{ 2230{
2229 struct task_struct *curr = current; 2231 struct task_struct *curr = current;
2230 2232
2231 time_hardirqs_off(CALLER_ADDR0, a0); 2233 time_hardirqs_off(CALLER_ADDR0, ip);
2232 2234
2233 if (unlikely(!debug_locks || current->lockdep_recursion)) 2235 if (unlikely(!debug_locks || current->lockdep_recursion))
2234 return; 2236 return;
@@ -2241,7 +2243,7 @@ void trace_hardirqs_off_caller(unsigned long a0)
2241 * We have done an ON -> OFF transition: 2243 * We have done an ON -> OFF transition:
2242 */ 2244 */
2243 curr->hardirqs_enabled = 0; 2245 curr->hardirqs_enabled = 0;
2244 curr->hardirq_disable_ip = _RET_IP_; 2246 curr->hardirq_disable_ip = ip;
2245 curr->hardirq_disable_event = ++curr->irq_events; 2247 curr->hardirq_disable_event = ++curr->irq_events;
2246 debug_atomic_inc(&hardirqs_off_events); 2248 debug_atomic_inc(&hardirqs_off_events);
2247 } else 2249 } else
@@ -2510,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2510 if (subclass) 2512 if (subclass)
2511 register_lock_class(lock, subclass, 1); 2513 register_lock_class(lock, subclass, 1);
2512} 2514}
2513
2514EXPORT_SYMBOL_GPL(lockdep_init_map); 2515EXPORT_SYMBOL_GPL(lockdep_init_map);
2515 2516
2516/* 2517/*
@@ -2691,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2691} 2692}
2692 2693
2693static int 2694static int
2694__lock_set_subclass(struct lockdep_map *lock, 2695__lock_set_class(struct lockdep_map *lock, const char *name,
2695 unsigned int subclass, unsigned long ip) 2696 struct lock_class_key *key, unsigned int subclass,
2697 unsigned long ip)
2696{ 2698{
2697 struct task_struct *curr = current; 2699 struct task_struct *curr = current;
2698 struct held_lock *hlock, *prev_hlock; 2700 struct held_lock *hlock, *prev_hlock;
@@ -2719,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
2719 return print_unlock_inbalance_bug(curr, lock, ip); 2721 return print_unlock_inbalance_bug(curr, lock, ip);
2720 2722
2721found_it: 2723found_it:
2724 lockdep_init_map(lock, name, key, 0);
2722 class = register_lock_class(lock, subclass, 0); 2725 class = register_lock_class(lock, subclass, 0);
2723 hlock->class_idx = class - lock_classes + 1; 2726 hlock->class_idx = class - lock_classes + 1;
2724 2727
@@ -2903,9 +2906,9 @@ static void check_flags(unsigned long flags)
2903#endif 2906#endif
2904} 2907}
2905 2908
2906void 2909void lock_set_class(struct lockdep_map *lock, const char *name,
2907lock_set_subclass(struct lockdep_map *lock, 2910 struct lock_class_key *key, unsigned int subclass,
2908 unsigned int subclass, unsigned long ip) 2911 unsigned long ip)
2909{ 2912{
2910 unsigned long flags; 2913 unsigned long flags;
2911 2914
@@ -2915,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
2915 raw_local_irq_save(flags); 2918 raw_local_irq_save(flags);
2916 current->lockdep_recursion = 1; 2919 current->lockdep_recursion = 1;
2917 check_flags(flags); 2920 check_flags(flags);
2918 if (__lock_set_subclass(lock, subclass, ip)) 2921 if (__lock_set_class(lock, name, key, subclass, ip))
2919 check_chain_key(current); 2922 check_chain_key(current);
2920 current->lockdep_recursion = 0; 2923 current->lockdep_recursion = 0;
2921 raw_local_irq_restore(flags); 2924 raw_local_irq_restore(flags);
2922} 2925}
2923 2926EXPORT_SYMBOL_GPL(lock_set_class);
2924EXPORT_SYMBOL_GPL(lock_set_subclass);
2925 2927
2926/* 2928/*
2927 * We are not always called with irqs disabled - do that here, 2929 * We are not always called with irqs disabled - do that here,
@@ -2945,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2945 current->lockdep_recursion = 0; 2947 current->lockdep_recursion = 0;
2946 raw_local_irq_restore(flags); 2948 raw_local_irq_restore(flags);
2947} 2949}
2948
2949EXPORT_SYMBOL_GPL(lock_acquire); 2950EXPORT_SYMBOL_GPL(lock_acquire);
2950 2951
2951void lock_release(struct lockdep_map *lock, int nested, 2952void lock_release(struct lockdep_map *lock, int nested,
@@ -2963,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
2963 current->lockdep_recursion = 0; 2964 current->lockdep_recursion = 0;
2964 raw_local_irq_restore(flags); 2965 raw_local_irq_restore(flags);
2965} 2966}
2966
2967EXPORT_SYMBOL_GPL(lock_release); 2967EXPORT_SYMBOL_GPL(lock_release);
2968 2968
2969#ifdef CONFIG_LOCK_STAT 2969#ifdef CONFIG_LOCK_STAT
@@ -3001,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3001 struct held_lock *hlock, *prev_hlock; 3001 struct held_lock *hlock, *prev_hlock;
3002 struct lock_class_stats *stats; 3002 struct lock_class_stats *stats;
3003 unsigned int depth; 3003 unsigned int depth;
3004 int i, point; 3004 int i, contention_point, contending_point;
3005 3005
3006 depth = curr->lockdep_depth; 3006 depth = curr->lockdep_depth;
3007 if (DEBUG_LOCKS_WARN_ON(!depth)) 3007 if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3025,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3025found_it: 3025found_it:
3026 hlock->waittime_stamp = sched_clock(); 3026 hlock->waittime_stamp = sched_clock();
3027 3027
3028 point = lock_contention_point(hlock_class(hlock), ip); 3028 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3029 contending_point = lock_point(hlock_class(hlock)->contending_point,
3030 lock->ip);
3029 3031
3030 stats = get_lock_stats(hlock_class(hlock)); 3032 stats = get_lock_stats(hlock_class(hlock));
3031 if (point < ARRAY_SIZE(stats->contention_point)) 3033 if (contention_point < LOCKSTAT_POINTS)
3032 stats->contention_point[point]++; 3034 stats->contention_point[contention_point]++;
3035 if (contending_point < LOCKSTAT_POINTS)
3036 stats->contending_point[contending_point]++;
3033 if (lock->cpu != smp_processor_id()) 3037 if (lock->cpu != smp_processor_id())
3034 stats->bounces[bounce_contended + !!hlock->read]++; 3038 stats->bounces[bounce_contended + !!hlock->read]++;
3035 put_lock_stats(stats); 3039 put_lock_stats(stats);
3036} 3040}
3037 3041
3038static void 3042static void
3039__lock_acquired(struct lockdep_map *lock) 3043__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3040{ 3044{
3041 struct task_struct *curr = current; 3045 struct task_struct *curr = current;
3042 struct held_lock *hlock, *prev_hlock; 3046 struct held_lock *hlock, *prev_hlock;
@@ -3085,6 +3089,7 @@ found_it:
3085 put_lock_stats(stats); 3089 put_lock_stats(stats);
3086 3090
3087 lock->cpu = cpu; 3091 lock->cpu = cpu;
3092 lock->ip = ip;
3088} 3093}
3089 3094
3090void lock_contended(struct lockdep_map *lock, unsigned long ip) 3095void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3106,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3106} 3111}
3107EXPORT_SYMBOL_GPL(lock_contended); 3112EXPORT_SYMBOL_GPL(lock_contended);
3108 3113
3109void lock_acquired(struct lockdep_map *lock) 3114void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3110{ 3115{
3111 unsigned long flags; 3116 unsigned long flags;
3112 3117
@@ -3119,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock)
3119 raw_local_irq_save(flags); 3124 raw_local_irq_save(flags);
3120 check_flags(flags); 3125 check_flags(flags);
3121 current->lockdep_recursion = 1; 3126 current->lockdep_recursion = 1;
3122 __lock_acquired(lock); 3127 __lock_acquired(lock, ip);
3123 current->lockdep_recursion = 0; 3128 current->lockdep_recursion = 0;
3124 raw_local_irq_restore(flags); 3129 raw_local_irq_restore(flags);
3125} 3130}
@@ -3278,10 +3283,10 @@ void __init lockdep_info(void)
3278{ 3283{
3279 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 3284 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3280 3285
3281 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 3286 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3282 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 3287 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3283 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 3288 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3284 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 3289 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3285 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 3290 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3286 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 3291 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3287 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 3292 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
@@ -3417,9 +3422,10 @@ retry:
3417 } 3422 }
3418 printk(" ignoring it.\n"); 3423 printk(" ignoring it.\n");
3419 unlock = 0; 3424 unlock = 0;
3425 } else {
3426 if (count != 10)
3427 printk(KERN_CONT " locked it.\n");
3420 } 3428 }
3421 if (count != 10)
3422 printk(" locked it.\n");
3423 3429
3424 do_each_thread(g, p) { 3430 do_each_thread(g, p) {
3425 /* 3431 /*
@@ -3442,7 +3448,6 @@ retry:
3442 if (unlock) 3448 if (unlock)
3443 read_unlock(&tasklist_lock); 3449 read_unlock(&tasklist_lock);
3444} 3450}
3445
3446EXPORT_SYMBOL_GPL(debug_show_all_locks); 3451EXPORT_SYMBOL_GPL(debug_show_all_locks);
3447 3452
3448/* 3453/*
@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
3463{ 3468{
3464 __debug_show_held_locks(task); 3469 __debug_show_held_locks(task);
3465} 3470}
3466
3467EXPORT_SYMBOL_GPL(debug_show_held_locks); 3471EXPORT_SYMBOL_GPL(debug_show_held_locks);
3468 3472
3469void lockdep_sys_exit(void) 3473void lockdep_sys_exit(void)