aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-20 13:15:35 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-02 09:41:35 -0400
commitbb97a91e2549a7f2df9c21d32542582f549ab3ec (patch)
treefdddcb458b74a39e97089e5bcc93234cb50704c6
parentf607c6685774811b8112e124f10a053d77015485 (diff)
lockdep: Deal with many similar locks
spin_lock_nest_lock() allows to take many instances of the same class, this can easily lead to overflow of MAX_LOCK_DEPTH. To avoid this overflow, we'll stop accounting instances but start reference counting the class in the held_lock structure. [ We could maintain a list of instances, if we'd move the hlock stuff into __lock_acquired(), but that would require significant modifications to the current code. ] We restrict this mode to spin_lock_nest_lock() only, because it degrades the lockdep quality due to lost of instance. For lockstat this means we don't track lock statistics for any but the first lock in the series. Currently nesting is limited to 11 bits because that was the spare space available in held_lock. This yields a 2048 instances maximium. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--kernel/lockdep.c89
2 files changed, 80 insertions, 13 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a6d5e5e4d084..47d42eff6124 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -213,10 +213,12 @@ struct held_lock {
213 * interrupt context: 213 * interrupt context:
214 */ 214 */
215 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 215 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
216 unsigned int trylock:1; 216 unsigned int trylock:1; /* 16 bits */
217
217 unsigned int read:2; /* see lock_acquire() comment */ 218 unsigned int read:2; /* see lock_acquire() comment */
218 unsigned int check:2; /* see lock_acquire() comment */ 219 unsigned int check:2; /* see lock_acquire() comment */
219 unsigned int hardirqs_off:1; 220 unsigned int hardirqs_off:1;
221 unsigned int references:11; /* 32 bits */
220}; 222};
221 223
222/* 224/*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 28914a509914..0bb246e21cd7 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2708,13 +2708,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2708 */ 2708 */
2709static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2709static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2710 int trylock, int read, int check, int hardirqs_off, 2710 int trylock, int read, int check, int hardirqs_off,
2711 struct lockdep_map *nest_lock, unsigned long ip) 2711 struct lockdep_map *nest_lock, unsigned long ip,
2712 int references)
2712{ 2713{
2713 struct task_struct *curr = current; 2714 struct task_struct *curr = current;
2714 struct lock_class *class = NULL; 2715 struct lock_class *class = NULL;
2715 struct held_lock *hlock; 2716 struct held_lock *hlock;
2716 unsigned int depth, id; 2717 unsigned int depth, id;
2717 int chain_head = 0; 2718 int chain_head = 0;
2719 int class_idx;
2718 u64 chain_key; 2720 u64 chain_key;
2719 2721
2720 if (!prove_locking) 2722 if (!prove_locking)
@@ -2762,10 +2764,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2762 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 2764 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2763 return 0; 2765 return 0;
2764 2766
2767 class_idx = class - lock_classes + 1;
2768
2769 if (depth) {
2770 hlock = curr->held_locks + depth - 1;
2771 if (hlock->class_idx == class_idx && nest_lock) {
2772 if (hlock->references)
2773 hlock->references++;
2774 else
2775 hlock->references = 2;
2776
2777 return 1;
2778 }
2779 }
2780
2765 hlock = curr->held_locks + depth; 2781 hlock = curr->held_locks + depth;
2766 if (DEBUG_LOCKS_WARN_ON(!class)) 2782 if (DEBUG_LOCKS_WARN_ON(!class))
2767 return 0; 2783 return 0;
2768 hlock->class_idx = class - lock_classes + 1; 2784 hlock->class_idx = class_idx;
2769 hlock->acquire_ip = ip; 2785 hlock->acquire_ip = ip;
2770 hlock->instance = lock; 2786 hlock->instance = lock;
2771 hlock->nest_lock = nest_lock; 2787 hlock->nest_lock = nest_lock;
@@ -2773,6 +2789,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2773 hlock->read = read; 2789 hlock->read = read;
2774 hlock->check = check; 2790 hlock->check = check;
2775 hlock->hardirqs_off = !!hardirqs_off; 2791 hlock->hardirqs_off = !!hardirqs_off;
2792 hlock->references = references;
2776#ifdef CONFIG_LOCK_STAT 2793#ifdef CONFIG_LOCK_STAT
2777 hlock->waittime_stamp = 0; 2794 hlock->waittime_stamp = 0;
2778 hlock->holdtime_stamp = sched_clock(); 2795 hlock->holdtime_stamp = sched_clock();
@@ -2881,6 +2898,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2881 return 1; 2898 return 1;
2882} 2899}
2883 2900
2901static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2902{
2903 if (hlock->instance == lock)
2904 return 1;
2905
2906 if (hlock->references) {
2907 struct lock_class *class = lock->class_cache;
2908
2909 if (!class)
2910 class = look_up_lock_class(lock, 0);
2911
2912 if (DEBUG_LOCKS_WARN_ON(!class))
2913 return 0;
2914
2915 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2916 return 0;
2917
2918 if (hlock->class_idx == class - lock_classes + 1)
2919 return 1;
2920 }
2921
2922 return 0;
2923}
2924
2884static int 2925static int
2885__lock_set_class(struct lockdep_map *lock, const char *name, 2926__lock_set_class(struct lockdep_map *lock, const char *name,
2886 struct lock_class_key *key, unsigned int subclass, 2927 struct lock_class_key *key, unsigned int subclass,
@@ -2904,7 +2945,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2904 */ 2945 */
2905 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 2946 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2906 break; 2947 break;
2907 if (hlock->instance == lock) 2948 if (match_held_lock(hlock, lock))
2908 goto found_it; 2949 goto found_it;
2909 prev_hlock = hlock; 2950 prev_hlock = hlock;
2910 } 2951 }
@@ -2923,7 +2964,8 @@ found_it:
2923 if (!__lock_acquire(hlock->instance, 2964 if (!__lock_acquire(hlock->instance,
2924 hlock_class(hlock)->subclass, hlock->trylock, 2965 hlock_class(hlock)->subclass, hlock->trylock,
2925 hlock->read, hlock->check, hlock->hardirqs_off, 2966 hlock->read, hlock->check, hlock->hardirqs_off,
2926 hlock->nest_lock, hlock->acquire_ip)) 2967 hlock->nest_lock, hlock->acquire_ip,
2968 hlock->references))
2927 return 0; 2969 return 0;
2928 } 2970 }
2929 2971
@@ -2962,20 +3004,34 @@ lock_release_non_nested(struct task_struct *curr,
2962 */ 3004 */
2963 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3005 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2964 break; 3006 break;
2965 if (hlock->instance == lock) 3007 if (match_held_lock(hlock, lock))
2966 goto found_it; 3008 goto found_it;
2967 prev_hlock = hlock; 3009 prev_hlock = hlock;
2968 } 3010 }
2969 return print_unlock_inbalance_bug(curr, lock, ip); 3011 return print_unlock_inbalance_bug(curr, lock, ip);
2970 3012
2971found_it: 3013found_it:
2972 lock_release_holdtime(hlock); 3014 if (hlock->instance == lock)
3015 lock_release_holdtime(hlock);
3016
3017 if (hlock->references) {
3018 hlock->references--;
3019 if (hlock->references) {
3020 /*
3021 * We had, and after removing one, still have
3022 * references, the current lock stack is still
3023 * valid. We're done!
3024 */
3025 return 1;
3026 }
3027 }
2973 3028
2974 /* 3029 /*
2975 * We have the right lock to unlock, 'hlock' points to it. 3030 * We have the right lock to unlock, 'hlock' points to it.
2976 * Now we remove it from the stack, and add back the other 3031 * Now we remove it from the stack, and add back the other
2977 * entries (if any), recalculating the hash along the way: 3032 * entries (if any), recalculating the hash along the way:
2978 */ 3033 */
3034
2979 curr->lockdep_depth = i; 3035 curr->lockdep_depth = i;
2980 curr->curr_chain_key = hlock->prev_chain_key; 3036 curr->curr_chain_key = hlock->prev_chain_key;
2981 3037
@@ -2984,7 +3040,8 @@ found_it:
2984 if (!__lock_acquire(hlock->instance, 3040 if (!__lock_acquire(hlock->instance,
2985 hlock_class(hlock)->subclass, hlock->trylock, 3041 hlock_class(hlock)->subclass, hlock->trylock,
2986 hlock->read, hlock->check, hlock->hardirqs_off, 3042 hlock->read, hlock->check, hlock->hardirqs_off,
2987 hlock->nest_lock, hlock->acquire_ip)) 3043 hlock->nest_lock, hlock->acquire_ip,
3044 hlock->references))
2988 return 0; 3045 return 0;
2989 } 3046 }
2990 3047
@@ -3014,7 +3071,7 @@ static int lock_release_nested(struct task_struct *curr,
3014 /* 3071 /*
3015 * Is the unlock non-nested: 3072 * Is the unlock non-nested:
3016 */ 3073 */
3017 if (hlock->instance != lock) 3074 if (hlock->instance != lock || hlock->references)
3018 return lock_release_non_nested(curr, lock, ip); 3075 return lock_release_non_nested(curr, lock, ip);
3019 curr->lockdep_depth--; 3076 curr->lockdep_depth--;
3020 3077
@@ -3065,7 +3122,9 @@ static int __lock_is_held(struct lockdep_map *lock)
3065 int i; 3122 int i;
3066 3123
3067 for (i = 0; i < curr->lockdep_depth; i++) { 3124 for (i = 0; i < curr->lockdep_depth; i++) {
3068 if (curr->held_locks[i].instance == lock) 3125 struct held_lock *hlock = curr->held_locks + i;
3126
3127 if (match_held_lock(hlock, lock))
3069 return 1; 3128 return 1;
3070 } 3129 }
3071 3130
@@ -3148,7 +3207,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3148 3207
3149 current->lockdep_recursion = 1; 3208 current->lockdep_recursion = 1;
3150 __lock_acquire(lock, subclass, trylock, read, check, 3209 __lock_acquire(lock, subclass, trylock, read, check,
3151 irqs_disabled_flags(flags), nest_lock, ip); 3210 irqs_disabled_flags(flags), nest_lock, ip, 0);
3152 current->lockdep_recursion = 0; 3211 current->lockdep_recursion = 0;
3153 raw_local_irq_restore(flags); 3212 raw_local_irq_restore(flags);
3154} 3213}
@@ -3252,7 +3311,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3252 */ 3311 */
3253 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3312 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3254 break; 3313 break;
3255 if (hlock->instance == lock) 3314 if (match_held_lock(hlock, lock))
3256 goto found_it; 3315 goto found_it;
3257 prev_hlock = hlock; 3316 prev_hlock = hlock;
3258 } 3317 }
@@ -3260,6 +3319,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3260 return; 3319 return;
3261 3320
3262found_it: 3321found_it:
3322 if (hlock->instance != lock)
3323 return;
3324
3263 hlock->waittime_stamp = sched_clock(); 3325 hlock->waittime_stamp = sched_clock();
3264 3326
3265 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3299,7 +3361,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3299 */ 3361 */
3300 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3362 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3301 break; 3363 break;
3302 if (hlock->instance == lock) 3364 if (match_held_lock(hlock, lock))
3303 goto found_it; 3365 goto found_it;
3304 prev_hlock = hlock; 3366 prev_hlock = hlock;
3305 } 3367 }
@@ -3307,6 +3369,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3307 return; 3369 return;
3308 3370
3309found_it: 3371found_it:
3372 if (hlock->instance != lock)
3373 return;
3374
3310 cpu = smp_processor_id(); 3375 cpu = smp_processor_id();
3311 if (hlock->waittime_stamp) { 3376 if (hlock->waittime_stamp) {
3312 now = sched_clock(); 3377 now = sched_clock();