diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-06-26 03:24:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:16 -0400 |
commit | 1fb00c6cbd8356f43b46322742f3c01c2a1f02da (patch) | |
tree | d337fb8dca27a719221d9012292e72c55e7267d1 /kernel | |
parent | 20c5426f8155a89b6df06325f9b278f5052b8c7e (diff) |
[PATCH] work around ppc64 bootup bug by making mutex-debugging save/restore irqs
It seems ppc64 wants to lock mutexes in early bootup code, with interrupts
disabled, and they expect interrupts to stay disabled, else they crash.
Work around this bug by making mutex debugging variants save/restore irq
flags.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/mutex-debug.c | 12 | ||||
-rw-r--r-- | kernel/mutex-debug.h | 25 | ||||
-rw-r--r-- | kernel/mutex.c | 21 | ||||
-rw-r--r-- | kernel/mutex.h | 6 |
4 files changed, 27 insertions, 37 deletions
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index f4913c376950..036b6285b15c 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -153,13 +153,13 @@ next: | |||
153 | continue; | 153 | continue; |
154 | count++; | 154 | count++; |
155 | cursor = curr->next; | 155 | cursor = curr->next; |
156 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 156 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
157 | 157 | ||
158 | printk("\n#%03d: ", count); | 158 | printk("\n#%03d: ", count); |
159 | printk_lock(lock, filter ? 0 : 1); | 159 | printk_lock(lock, filter ? 0 : 1); |
160 | goto next; | 160 | goto next; |
161 | } | 161 | } |
162 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 162 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
163 | printk("\n"); | 163 | printk("\n"); |
164 | } | 164 | } |
165 | 165 | ||
@@ -316,7 +316,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task) | |||
316 | continue; | 316 | continue; |
317 | list_del_init(curr); | 317 | list_del_init(curr); |
318 | DEBUG_OFF(); | 318 | DEBUG_OFF(); |
319 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 319 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
320 | 320 | ||
321 | printk("BUG: %s/%d, lock held at task exit time!\n", | 321 | printk("BUG: %s/%d, lock held at task exit time!\n", |
322 | task->comm, task->pid); | 322 | task->comm, task->pid); |
@@ -325,7 +325,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task) | |||
325 | printk("exiting task is not even the owner??\n"); | 325 | printk("exiting task is not even the owner??\n"); |
326 | return; | 326 | return; |
327 | } | 327 | } |
328 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 328 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
329 | } | 329 | } |
330 | 330 | ||
331 | /* | 331 | /* |
@@ -352,7 +352,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len) | |||
352 | continue; | 352 | continue; |
353 | list_del_init(curr); | 353 | list_del_init(curr); |
354 | DEBUG_OFF(); | 354 | DEBUG_OFF(); |
355 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 355 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
356 | 356 | ||
357 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", | 357 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", |
358 | current->comm, current->pid, lock, from, to); | 358 | current->comm, current->pid, lock, from, to); |
@@ -362,7 +362,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len) | |||
362 | printk("freeing task is not even the owner??\n"); | 362 | printk("freeing task is not even the owner??\n"); |
363 | return; | 363 | return; |
364 | } | 364 | } |
365 | debug_spin_lock_restore(&debug_mutex_lock, flags); | 365 | debug_spin_unlock_restore(&debug_mutex_lock, flags); |
366 | } | 366 | } |
367 | 367 | ||
368 | /* | 368 | /* |
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index fd384050acb1..a5196c36a5fd 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h | |||
@@ -46,21 +46,6 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |||
46 | extern void debug_mutex_unlock(struct mutex *lock); | 46 | extern void debug_mutex_unlock(struct mutex *lock); |
47 | extern void debug_mutex_init(struct mutex *lock, const char *name); | 47 | extern void debug_mutex_init(struct mutex *lock, const char *name); |
48 | 48 | ||
49 | #define debug_spin_lock(lock) \ | ||
50 | do { \ | ||
51 | local_irq_disable(); \ | ||
52 | if (debug_mutex_on) \ | ||
53 | spin_lock(lock); \ | ||
54 | } while (0) | ||
55 | |||
56 | #define debug_spin_unlock(lock) \ | ||
57 | do { \ | ||
58 | if (debug_mutex_on) \ | ||
59 | spin_unlock(lock); \ | ||
60 | local_irq_enable(); \ | ||
61 | preempt_check_resched(); \ | ||
62 | } while (0) | ||
63 | |||
64 | #define debug_spin_lock_save(lock, flags) \ | 49 | #define debug_spin_lock_save(lock, flags) \ |
65 | do { \ | 50 | do { \ |
66 | local_irq_save(flags); \ | 51 | local_irq_save(flags); \ |
@@ -68,7 +53,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name); | |||
68 | spin_lock(lock); \ | 53 | spin_lock(lock); \ |
69 | } while (0) | 54 | } while (0) |
70 | 55 | ||
71 | #define debug_spin_lock_restore(lock, flags) \ | 56 | #define debug_spin_unlock_restore(lock, flags) \ |
72 | do { \ | 57 | do { \ |
73 | if (debug_mutex_on) \ | 58 | if (debug_mutex_on) \ |
74 | spin_unlock(lock); \ | 59 | spin_unlock(lock); \ |
@@ -76,20 +61,20 @@ extern void debug_mutex_init(struct mutex *lock, const char *name); | |||
76 | preempt_check_resched(); \ | 61 | preempt_check_resched(); \ |
77 | } while (0) | 62 | } while (0) |
78 | 63 | ||
79 | #define spin_lock_mutex(lock) \ | 64 | #define spin_lock_mutex(lock, flags) \ |
80 | do { \ | 65 | do { \ |
81 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ | 66 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ |
82 | \ | 67 | \ |
83 | DEBUG_WARN_ON(in_interrupt()); \ | 68 | DEBUG_WARN_ON(in_interrupt()); \ |
84 | debug_spin_lock(&debug_mutex_lock); \ | 69 | debug_spin_lock_save(&debug_mutex_lock, flags); \ |
85 | spin_lock(lock); \ | 70 | spin_lock(lock); \ |
86 | DEBUG_WARN_ON(l->magic != l); \ | 71 | DEBUG_WARN_ON(l->magic != l); \ |
87 | } while (0) | 72 | } while (0) |
88 | 73 | ||
89 | #define spin_unlock_mutex(lock) \ | 74 | #define spin_unlock_mutex(lock, flags) \ |
90 | do { \ | 75 | do { \ |
91 | spin_unlock(lock); \ | 76 | spin_unlock(lock); \ |
92 | debug_spin_unlock(&debug_mutex_lock); \ | 77 | debug_spin_unlock_restore(&debug_mutex_lock, flags); \ |
93 | } while (0) | 78 | } while (0) |
94 | 79 | ||
95 | #define DEBUG_OFF() \ | 80 | #define DEBUG_OFF() \ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 5449b210d9ed..7043db21bbce 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -125,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
125 | struct task_struct *task = current; | 125 | struct task_struct *task = current; |
126 | struct mutex_waiter waiter; | 126 | struct mutex_waiter waiter; |
127 | unsigned int old_val; | 127 | unsigned int old_val; |
128 | unsigned long flags; | ||
128 | 129 | ||
129 | debug_mutex_init_waiter(&waiter); | 130 | debug_mutex_init_waiter(&waiter); |
130 | 131 | ||
131 | spin_lock_mutex(&lock->wait_lock); | 132 | spin_lock_mutex(&lock->wait_lock, flags); |
132 | 133 | ||
133 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | 134 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); |
134 | 135 | ||
@@ -157,7 +158,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
157 | if (unlikely(state == TASK_INTERRUPTIBLE && | 158 | if (unlikely(state == TASK_INTERRUPTIBLE && |
158 | signal_pending(task))) { | 159 | signal_pending(task))) { |
159 | mutex_remove_waiter(lock, &waiter, task->thread_info); | 160 | mutex_remove_waiter(lock, &waiter, task->thread_info); |
160 | spin_unlock_mutex(&lock->wait_lock); | 161 | spin_unlock_mutex(&lock->wait_lock, flags); |
161 | 162 | ||
162 | debug_mutex_free_waiter(&waiter); | 163 | debug_mutex_free_waiter(&waiter); |
163 | return -EINTR; | 164 | return -EINTR; |
@@ -165,9 +166,9 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
165 | __set_task_state(task, state); | 166 | __set_task_state(task, state); |
166 | 167 | ||
167 | /* didnt get the lock, go to sleep: */ | 168 | /* didnt get the lock, go to sleep: */ |
168 | spin_unlock_mutex(&lock->wait_lock); | 169 | spin_unlock_mutex(&lock->wait_lock, flags); |
169 | schedule(); | 170 | schedule(); |
170 | spin_lock_mutex(&lock->wait_lock); | 171 | spin_lock_mutex(&lock->wait_lock, flags); |
171 | } | 172 | } |
172 | 173 | ||
173 | /* got the lock - rejoice! */ | 174 | /* got the lock - rejoice! */ |
@@ -178,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
178 | if (likely(list_empty(&lock->wait_list))) | 179 | if (likely(list_empty(&lock->wait_list))) |
179 | atomic_set(&lock->count, 0); | 180 | atomic_set(&lock->count, 0); |
180 | 181 | ||
181 | spin_unlock_mutex(&lock->wait_lock); | 182 | spin_unlock_mutex(&lock->wait_lock, flags); |
182 | 183 | ||
183 | debug_mutex_free_waiter(&waiter); | 184 | debug_mutex_free_waiter(&waiter); |
184 | 185 | ||
@@ -203,10 +204,11 @@ static fastcall noinline void | |||
203 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | 204 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) |
204 | { | 205 | { |
205 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 206 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
207 | unsigned long flags; | ||
206 | 208 | ||
207 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | 209 | DEBUG_WARN_ON(lock->owner != current_thread_info()); |
208 | 210 | ||
209 | spin_lock_mutex(&lock->wait_lock); | 211 | spin_lock_mutex(&lock->wait_lock, flags); |
210 | 212 | ||
211 | /* | 213 | /* |
212 | * some architectures leave the lock unlocked in the fastpath failure | 214 | * some architectures leave the lock unlocked in the fastpath failure |
@@ -231,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
231 | 233 | ||
232 | debug_mutex_clear_owner(lock); | 234 | debug_mutex_clear_owner(lock); |
233 | 235 | ||
234 | spin_unlock_mutex(&lock->wait_lock); | 236 | spin_unlock_mutex(&lock->wait_lock, flags); |
235 | } | 237 | } |
236 | 238 | ||
237 | /* | 239 | /* |
@@ -276,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | |||
276 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | 278 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
277 | { | 279 | { |
278 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 280 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
281 | unsigned long flags; | ||
279 | int prev; | 282 | int prev; |
280 | 283 | ||
281 | spin_lock_mutex(&lock->wait_lock); | 284 | spin_lock_mutex(&lock->wait_lock, flags); |
282 | 285 | ||
283 | prev = atomic_xchg(&lock->count, -1); | 286 | prev = atomic_xchg(&lock->count, -1); |
284 | if (likely(prev == 1)) | 287 | if (likely(prev == 1)) |
@@ -287,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
287 | if (likely(list_empty(&lock->wait_list))) | 290 | if (likely(list_empty(&lock->wait_list))) |
288 | atomic_set(&lock->count, 0); | 291 | atomic_set(&lock->count, 0); |
289 | 292 | ||
290 | spin_unlock_mutex(&lock->wait_lock); | 293 | spin_unlock_mutex(&lock->wait_lock, flags); |
291 | 294 | ||
292 | return prev == 1; | 295 | return prev == 1; |
293 | } | 296 | } |
diff --git a/kernel/mutex.h b/kernel/mutex.h index 00fe84e7b672..069189947257 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h | |||
@@ -9,8 +9,10 @@ | |||
9 | * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs: | 9 | * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs: |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define spin_lock_mutex(lock) spin_lock(lock) | 12 | #define spin_lock_mutex(lock, flags) \ |
13 | #define spin_unlock_mutex(lock) spin_unlock(lock) | 13 | do { spin_lock(lock); (void)(flags); } while (0) |
14 | #define spin_unlock_mutex(lock, flags) \ | ||
15 | do { spin_unlock(lock); (void)(flags); } while (0) | ||
14 | #define mutex_remove_waiter(lock, waiter, ti) \ | 16 | #define mutex_remove_waiter(lock, waiter, ti) \ |
15 | __list_del((waiter)->list.prev, (waiter)->list.next) | 17 | __list_del((waiter)->list.prev, (waiter)->list.next) |
16 | 18 | ||