aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-12-06 23:40:50 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:46 -0500
commit70e4506765602cca047cfa31933836e354c61a63 (patch)
treeb490e5066404eab1ec7cf5147521c0bd53226b07 /kernel
parent72be2ccfff0e0e332b32f7ef8372890e39b7c4cb (diff)
[PATCH] lockdep: register_lock_class() fix
The hash_lock must only ever be taken with irqs disabled. This happens in all the important places, except one codepath: register_lock_class(). The race should trigger rarely because register_lock_class() is quite rare and single-threaded (happens during init most of the time). The fix is to disable irqs. ( bug found live in -rt: there preemption is alot more agressive and preempting with the hash-lock held caused a lockup.) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3926c3674354..62e73ce68197 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1182,6 +1182,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1182 struct lockdep_subclass_key *key; 1182 struct lockdep_subclass_key *key;
1183 struct list_head *hash_head; 1183 struct list_head *hash_head;
1184 struct lock_class *class; 1184 struct lock_class *class;
1185 unsigned long flags;
1185 1186
1186 class = look_up_lock_class(lock, subclass); 1187 class = look_up_lock_class(lock, subclass);
1187 if (likely(class)) 1188 if (likely(class))
@@ -1203,6 +1204,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1203 key = lock->key->subkeys + subclass; 1204 key = lock->key->subkeys + subclass;
1204 hash_head = classhashentry(key); 1205 hash_head = classhashentry(key);
1205 1206
1207 raw_local_irq_save(flags);
1206 __raw_spin_lock(&hash_lock); 1208 __raw_spin_lock(&hash_lock);
1207 /* 1209 /*
1208 * We have to do the hash-walk again, to avoid races 1210 * We have to do the hash-walk again, to avoid races
@@ -1217,6 +1219,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1217 */ 1219 */
1218 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 1220 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1219 __raw_spin_unlock(&hash_lock); 1221 __raw_spin_unlock(&hash_lock);
1222 raw_local_irq_restore(flags);
1220 debug_locks_off(); 1223 debug_locks_off();
1221 printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); 1224 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1222 printk("turning off the locking correctness validator.\n"); 1225 printk("turning off the locking correctness validator.\n");
@@ -1239,15 +1242,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1239 1242
1240 if (verbose(class)) { 1243 if (verbose(class)) {
1241 __raw_spin_unlock(&hash_lock); 1244 __raw_spin_unlock(&hash_lock);
1245 raw_local_irq_restore(flags);
1242 printk("\nnew class %p: %s", class->key, class->name); 1246 printk("\nnew class %p: %s", class->key, class->name);
1243 if (class->name_version > 1) 1247 if (class->name_version > 1)
1244 printk("#%d", class->name_version); 1248 printk("#%d", class->name_version);
1245 printk("\n"); 1249 printk("\n");
1246 dump_stack(); 1250 dump_stack();
1251 raw_local_irq_save(flags);
1247 __raw_spin_lock(&hash_lock); 1252 __raw_spin_lock(&hash_lock);
1248 } 1253 }
1249out_unlock_set: 1254out_unlock_set:
1250 __raw_spin_unlock(&hash_lock); 1255 __raw_spin_unlock(&hash_lock);
1256 raw_local_irq_restore(flags);
1251 1257
1252 if (!subclass || force) 1258 if (!subclass || force)
1253 lock->class_cache = class; 1259 lock->class_cache = class;