diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 101ceeb38925..3aad0b7992f4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/debug_locks.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 23 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
@@ -38,7 +39,7 @@ | |||
38 | * | 39 | * |
39 | * It is not allowed to initialize an already locked mutex. | 40 | * It is not allowed to initialize an already locked mutex. |
40 | */ | 41 | */ |
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | 42 | __always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) |
42 | { | 43 | { |
43 | atomic_set(&lock->count, 1); | 44 | atomic_set(&lock->count, 1); |
44 | spin_lock_init(&lock->wait_lock); | 45 | spin_lock_init(&lock->wait_lock); |
@@ -56,7 +57,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
56 | * branch is predicted by the CPU as default-untaken. | 57 | * branch is predicted by the CPU as default-untaken. |
57 | */ | 58 | */ |
58 | static void fastcall noinline __sched | 59 | static void fastcall noinline __sched |
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
60 | 61 | ||
61 | /*** | 62 | /*** |
62 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
@@ -79,7 +80,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | |||
79 | * | 80 | * |
80 | * This function is similar to (but not equivalent to) down(). | 81 | * This function is similar to (but not equivalent to) down(). |
81 | */ | 82 | */ |
82 | void fastcall __sched mutex_lock(struct mutex *lock) | 83 | void inline fastcall __sched mutex_lock(struct mutex *lock) |
83 | { | 84 | { |
84 | might_sleep(); | 85 | might_sleep(); |
85 | /* | 86 | /* |
@@ -92,7 +93,7 @@ void fastcall __sched mutex_lock(struct mutex *lock) | |||
92 | EXPORT_SYMBOL(mutex_lock); | 93 | EXPORT_SYMBOL(mutex_lock); |
93 | 94 | ||
94 | static void fastcall noinline __sched | 95 | static void fastcall noinline __sched |
95 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | 96 | __mutex_unlock_slowpath(atomic_t *lock_count); |
96 | 97 | ||
97 | /*** | 98 | /*** |
98 | * mutex_unlock - release the mutex | 99 | * mutex_unlock - release the mutex |
@@ -120,18 +121,17 @@ EXPORT_SYMBOL(mutex_unlock); | |||
120 | * Lock a mutex (possibly interruptible), slowpath: | 121 | * Lock a mutex (possibly interruptible), slowpath: |
121 | */ | 122 | */ |
122 | static inline int __sched | 123 | static inline int __sched |
123 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | 124 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) |
124 | { | 125 | { |
125 | struct task_struct *task = current; | 126 | struct task_struct *task = current; |
126 | struct mutex_waiter waiter; | 127 | struct mutex_waiter waiter; |
127 | unsigned int old_val; | 128 | unsigned int old_val; |
128 | unsigned long flags; | 129 | unsigned long flags; |
129 | 130 | ||
130 | debug_mutex_init_waiter(&waiter); | ||
131 | |||
132 | spin_lock_mutex(&lock->wait_lock, flags); | 131 | spin_lock_mutex(&lock->wait_lock, flags); |
133 | 132 | ||
134 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | 133 | debug_mutex_lock_common(lock, &waiter); |
134 | debug_mutex_add_waiter(lock, &waiter, task->thread_info); | ||
135 | 135 | ||
136 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 136 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
137 | list_add_tail(&waiter.list, &lock->wait_list); | 137 | list_add_tail(&waiter.list, &lock->wait_list); |
@@ -173,7 +173,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
173 | 173 | ||
174 | /* got the lock - rejoice! */ | 174 | /* got the lock - rejoice! */ |
175 | mutex_remove_waiter(lock, &waiter, task->thread_info); | 175 | mutex_remove_waiter(lock, &waiter, task->thread_info); |
176 | debug_mutex_set_owner(lock, task->thread_info __IP__); | 176 | debug_mutex_set_owner(lock, task->thread_info); |
177 | 177 | ||
178 | /* set it to 0 if there are no waiters left: */ | 178 | /* set it to 0 if there are no waiters left: */ |
179 | if (likely(list_empty(&lock->wait_list))) | 179 | if (likely(list_empty(&lock->wait_list))) |
@@ -183,32 +183,28 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
183 | 183 | ||
184 | debug_mutex_free_waiter(&waiter); | 184 | debug_mutex_free_waiter(&waiter); |
185 | 185 | ||
186 | DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list)); | ||
187 | DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info); | ||
188 | |||
189 | return 0; | 186 | return 0; |
190 | } | 187 | } |
191 | 188 | ||
192 | static void fastcall noinline __sched | 189 | static void fastcall noinline __sched |
193 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | 190 | __mutex_lock_slowpath(atomic_t *lock_count) |
194 | { | 191 | { |
195 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 192 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
196 | 193 | ||
197 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | 194 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); |
198 | } | 195 | } |
199 | 196 | ||
200 | /* | 197 | /* |
201 | * Release the lock, slowpath: | 198 | * Release the lock, slowpath: |
202 | */ | 199 | */ |
203 | static fastcall noinline void | 200 | static fastcall inline void |
204 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | 201 | __mutex_unlock_common_slowpath(atomic_t *lock_count) |
205 | { | 202 | { |
206 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 203 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
207 | unsigned long flags; | 204 | unsigned long flags; |
208 | 205 | ||
209 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | ||
210 | |||
211 | spin_lock_mutex(&lock->wait_lock, flags); | 206 | spin_lock_mutex(&lock->wait_lock, flags); |
207 | debug_mutex_unlock(lock); | ||
212 | 208 | ||
213 | /* | 209 | /* |
214 | * some architectures leave the lock unlocked in the fastpath failure | 210 | * some architectures leave the lock unlocked in the fastpath failure |
@@ -218,8 +214,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
218 | if (__mutex_slowpath_needs_to_unlock()) | 214 | if (__mutex_slowpath_needs_to_unlock()) |
219 | atomic_set(&lock->count, 1); | 215 | atomic_set(&lock->count, 1); |
220 | 216 | ||
221 | debug_mutex_unlock(lock); | ||
222 | |||
223 | if (!list_empty(&lock->wait_list)) { | 217 | if (!list_empty(&lock->wait_list)) { |
224 | /* get the first entry from the wait-list: */ | 218 | /* get the first entry from the wait-list: */ |
225 | struct mutex_waiter *waiter = | 219 | struct mutex_waiter *waiter = |
@@ -237,11 +231,20 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
237 | } | 231 | } |
238 | 232 | ||
239 | /* | 233 | /* |
234 | * Release the lock, slowpath: | ||
235 | */ | ||
236 | static fastcall noinline void | ||
237 | __mutex_unlock_slowpath(atomic_t *lock_count) | ||
238 | { | ||
239 | __mutex_unlock_common_slowpath(lock_count); | ||
240 | } | ||
241 | |||
242 | /* | ||
240 | * Here come the less common (and hence less performance-critical) APIs: | 243 | * Here come the less common (and hence less performance-critical) APIs: |
241 | * mutex_lock_interruptible() and mutex_trylock(). | 244 | * mutex_lock_interruptible() and mutex_trylock(). |
242 | */ | 245 | */ |
243 | static int fastcall noinline __sched | 246 | static int fastcall noinline __sched |
244 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | 247 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
245 | 248 | ||
246 | /*** | 249 | /*** |
247 | * mutex_lock_interruptible - acquire the mutex, interruptable | 250 | * mutex_lock_interruptible - acquire the mutex, interruptable |
@@ -264,11 +267,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |||
264 | EXPORT_SYMBOL(mutex_lock_interruptible); | 267 | EXPORT_SYMBOL(mutex_lock_interruptible); |
265 | 268 | ||
266 | static int fastcall noinline __sched | 269 | static int fastcall noinline __sched |
267 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | 270 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
268 | { | 271 | { |
269 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 272 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
270 | 273 | ||
271 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | 274 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); |
272 | } | 275 | } |
273 | 276 | ||
274 | /* | 277 | /* |
@@ -285,7 +288,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
285 | 288 | ||
286 | prev = atomic_xchg(&lock->count, -1); | 289 | prev = atomic_xchg(&lock->count, -1); |
287 | if (likely(prev == 1)) | 290 | if (likely(prev == 1)) |
288 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | 291 | debug_mutex_set_owner(lock, current_thread_info()); |
292 | |||
289 | /* Set it back to 0 if there are no waiters: */ | 293 | /* Set it back to 0 if there are no waiters: */ |
290 | if (likely(list_empty(&lock->wait_list))) | 294 | if (likely(list_empty(&lock->wait_list))) |
291 | atomic_set(&lock->count, 0); | 295 | atomic_set(&lock->count, 0); |