diff options
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 989 |
1 files changed, 989 insertions, 0 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c new file mode 100644 index 000000000000..d2ef13b485e7 --- /dev/null +++ b/kernel/rtmutex.c | |||
@@ -0,0 +1,989 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner. | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | ||
9 | * Copyright (C) 2006 Esben Nielsen | ||
10 | */ | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/timer.h> | ||
15 | |||
16 | #include "rtmutex_common.h" | ||
17 | |||
18 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
19 | # include "rtmutex-debug.h" | ||
20 | #else | ||
21 | # include "rtmutex.h" | ||
22 | #endif | ||
23 | |||
24 | /* | ||
25 | * lock->owner state tracking: | ||
26 | * | ||
27 | * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 | ||
28 | * are used to keep track of the "owner is pending" and "lock has | ||
29 | * waiters" state. | ||
30 | * | ||
31 | * owner bit1 bit0 | ||
32 | * NULL 0 0 lock is free (fast acquire possible) | ||
33 | * NULL 0 1 invalid state | ||
34 | * NULL 1 0 Transitional State* | ||
35 | * NULL 1 1 invalid state | ||
36 | * taskpointer 0 0 lock is held (fast release possible) | ||
37 | * taskpointer 0 1 task is pending owner | ||
38 | * taskpointer 1 0 lock is held and has waiters | ||
39 | * taskpointer 1 1 task is pending owner and lock has more waiters | ||
40 | * | ||
41 | * Pending ownership is assigned to the top (highest priority) | ||
42 | * waiter of the lock, when the lock is released. The thread is woken | ||
43 | * up and can now take the lock. Until the lock is taken (bit 0 | ||
44 | * cleared) a competing higher priority thread can steal the lock | ||
45 | * which puts the woken up thread back on the waiters list. | ||
46 | * | ||
47 | * The fast atomic compare exchange based acquire and release is only | ||
48 | * possible when bit 0 and 1 of lock->owner are 0. | ||
49 | * | ||
50 | * (*) There's a small time where the owner can be NULL and the | ||
51 | * "lock has waiters" bit is set. This can happen when grabbing the lock. | ||
52 | * To prevent a cmpxchg of the owner releasing the lock, we need to set this | ||
53 | * bit before looking at the lock, hence the reason this is a transitional | ||
54 | * state. | ||
55 | */ | ||
56 | |||
57 | static void | ||
58 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | ||
59 | unsigned long mask) | ||
60 | { | ||
61 | unsigned long val = (unsigned long)owner | mask; | ||
62 | |||
63 | if (rt_mutex_has_waiters(lock)) | ||
64 | val |= RT_MUTEX_HAS_WAITERS; | ||
65 | |||
66 | lock->owner = (struct task_struct *)val; | ||
67 | } | ||
68 | |||
69 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | ||
70 | { | ||
71 | lock->owner = (struct task_struct *) | ||
72 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | ||
73 | } | ||
74 | |||
75 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | ||
76 | { | ||
77 | if (!rt_mutex_has_waiters(lock)) | ||
78 | clear_rt_mutex_waiters(lock); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * We can speed up the acquire/release, if the architecture | ||
83 | * supports cmpxchg and if there's no debugging state to be set up | ||
84 | */ | ||
85 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | ||
86 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | ||
87 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
88 | { | ||
89 | unsigned long owner, *p = (unsigned long *) &lock->owner; | ||
90 | |||
91 | do { | ||
92 | owner = *p; | ||
93 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | ||
94 | } | ||
95 | #else | ||
96 | # define rt_mutex_cmpxchg(l,c,n) (0) | ||
97 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
98 | { | ||
99 | lock->owner = (struct task_struct *) | ||
100 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | ||
101 | } | ||
102 | #endif | ||
103 | |||
104 | /* | ||
105 | * Calculate task priority from the waiter list priority | ||
106 | * | ||
107 | * Return task->normal_prio when the waiter list is empty or when | ||
108 | * the waiter is not allowed to do priority boosting | ||
109 | */ | ||
110 | int rt_mutex_getprio(struct task_struct *task) | ||
111 | { | ||
112 | if (likely(!task_has_pi_waiters(task))) | ||
113 | return task->normal_prio; | ||
114 | |||
115 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | ||
116 | task->normal_prio); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Adjust the priority of a task, after its pi_waiters got modified. | ||
121 | * | ||
122 | * This can be both boosting and unboosting. task->pi_lock must be held. | ||
123 | */ | ||
124 | static void __rt_mutex_adjust_prio(struct task_struct *task) | ||
125 | { | ||
126 | int prio = rt_mutex_getprio(task); | ||
127 | |||
128 | if (task->prio != prio) | ||
129 | rt_mutex_setprio(task, prio); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Adjust task priority (undo boosting). Called from the exit path of | ||
134 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | ||
135 | * | ||
136 | * (Note: We do this outside of the protection of lock->wait_lock to | ||
137 | * allow the lock to be taken while or before we readjust the priority | ||
138 | * of task. We do not use the spin_xx_mutex() variants here as we are | ||
139 | * outside of the debug path.) | ||
140 | */ | ||
141 | static void rt_mutex_adjust_prio(struct task_struct *task) | ||
142 | { | ||
143 | unsigned long flags; | ||
144 | |||
145 | spin_lock_irqsave(&task->pi_lock, flags); | ||
146 | __rt_mutex_adjust_prio(task); | ||
147 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Max number of times we'll walk the boosting chain: | ||
152 | */ | ||
153 | int max_lock_depth = 1024; | ||
154 | |||
155 | /* | ||
156 | * Adjust the priority chain. Also used for deadlock detection. | ||
157 | * Decreases task's usage by one - may thus free the task. | ||
158 | * Returns 0 or -EDEADLK. | ||
159 | */ | ||
160 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, | ||
161 | int deadlock_detect, | ||
162 | struct rt_mutex *orig_lock, | ||
163 | struct rt_mutex_waiter *orig_waiter, | ||
164 | struct task_struct *top_task) | ||
165 | { | ||
166 | struct rt_mutex *lock; | ||
167 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | ||
168 | int detect_deadlock, ret = 0, depth = 0; | ||
169 | unsigned long flags; | ||
170 | |||
171 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | ||
172 | deadlock_detect); | ||
173 | |||
174 | /* | ||
175 | * The (de)boosting is a step by step approach with a lot of | ||
176 | * pitfalls. We want this to be preemptible and we want hold a | ||
177 | * maximum of two locks per step. So we have to check | ||
178 | * carefully whether things change under us. | ||
179 | */ | ||
180 | again: | ||
181 | if (++depth > max_lock_depth) { | ||
182 | static int prev_max; | ||
183 | |||
184 | /* | ||
185 | * Print this only once. If the admin changes the limit, | ||
186 | * print a new message when reaching the limit again. | ||
187 | */ | ||
188 | if (prev_max != max_lock_depth) { | ||
189 | prev_max = max_lock_depth; | ||
190 | printk(KERN_WARNING "Maximum lock depth %d reached " | ||
191 | "task: %s (%d)\n", max_lock_depth, | ||
192 | top_task->comm, top_task->pid); | ||
193 | } | ||
194 | put_task_struct(task); | ||
195 | |||
196 | return deadlock_detect ? -EDEADLK : 0; | ||
197 | } | ||
198 | retry: | ||
199 | /* | ||
200 | * Task can not go away as we did a get_task() before ! | ||
201 | */ | ||
202 | spin_lock_irqsave(&task->pi_lock, flags); | ||
203 | |||
204 | waiter = task->pi_blocked_on; | ||
205 | /* | ||
206 | * Check whether the end of the boosting chain has been | ||
207 | * reached or the state of the chain has changed while we | ||
208 | * dropped the locks. | ||
209 | */ | ||
210 | if (!waiter || !waiter->task) | ||
211 | goto out_unlock_pi; | ||
212 | |||
213 | if (top_waiter && (!task_has_pi_waiters(task) || | ||
214 | top_waiter != task_top_pi_waiter(task))) | ||
215 | goto out_unlock_pi; | ||
216 | |||
217 | /* | ||
218 | * When deadlock detection is off then we check, if further | ||
219 | * priority adjustment is necessary. | ||
220 | */ | ||
221 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | ||
222 | goto out_unlock_pi; | ||
223 | |||
224 | lock = waiter->lock; | ||
225 | if (!spin_trylock(&lock->wait_lock)) { | ||
226 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
227 | cpu_relax(); | ||
228 | goto retry; | ||
229 | } | ||
230 | |||
231 | /* Deadlock detection */ | ||
232 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | ||
233 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | ||
234 | spin_unlock(&lock->wait_lock); | ||
235 | ret = deadlock_detect ? -EDEADLK : 0; | ||
236 | goto out_unlock_pi; | ||
237 | } | ||
238 | |||
239 | top_waiter = rt_mutex_top_waiter(lock); | ||
240 | |||
241 | /* Requeue the waiter */ | ||
242 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
243 | waiter->list_entry.prio = task->prio; | ||
244 | plist_add(&waiter->list_entry, &lock->wait_list); | ||
245 | |||
246 | /* Release the task */ | ||
247 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
248 | put_task_struct(task); | ||
249 | |||
250 | /* Grab the next task */ | ||
251 | task = rt_mutex_owner(lock); | ||
252 | spin_lock_irqsave(&task->pi_lock, flags); | ||
253 | |||
254 | if (waiter == rt_mutex_top_waiter(lock)) { | ||
255 | /* Boost the owner */ | ||
256 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | ||
257 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | ||
258 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
259 | __rt_mutex_adjust_prio(task); | ||
260 | |||
261 | } else if (top_waiter == waiter) { | ||
262 | /* Deboost the owner */ | ||
263 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | ||
264 | waiter = rt_mutex_top_waiter(lock); | ||
265 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | ||
266 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
267 | __rt_mutex_adjust_prio(task); | ||
268 | } | ||
269 | |||
270 | get_task_struct(task); | ||
271 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
272 | |||
273 | top_waiter = rt_mutex_top_waiter(lock); | ||
274 | spin_unlock(&lock->wait_lock); | ||
275 | |||
276 | if (!detect_deadlock && waiter != top_waiter) | ||
277 | goto out_put_task; | ||
278 | |||
279 | goto again; | ||
280 | |||
281 | out_unlock_pi: | ||
282 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
283 | out_put_task: | ||
284 | put_task_struct(task); | ||
285 | |||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Optimization: check if we can steal the lock from the | ||
291 | * assigned pending owner [which might not have taken the | ||
292 | * lock yet]: | ||
293 | */ | ||
294 | static inline int try_to_steal_lock(struct rt_mutex *lock) | ||
295 | { | ||
296 | struct task_struct *pendowner = rt_mutex_owner(lock); | ||
297 | struct rt_mutex_waiter *next; | ||
298 | unsigned long flags; | ||
299 | |||
300 | if (!rt_mutex_owner_pending(lock)) | ||
301 | return 0; | ||
302 | |||
303 | if (pendowner == current) | ||
304 | return 1; | ||
305 | |||
306 | spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
307 | if (current->prio >= pendowner->prio) { | ||
308 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Check if a waiter is enqueued on the pending owners | ||
314 | * pi_waiters list. Remove it and readjust pending owners | ||
315 | * priority. | ||
316 | */ | ||
317 | if (likely(!rt_mutex_has_waiters(lock))) { | ||
318 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
319 | return 1; | ||
320 | } | ||
321 | |||
322 | /* No chain handling, pending owner is not blocked on anything: */ | ||
323 | next = rt_mutex_top_waiter(lock); | ||
324 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | ||
325 | __rt_mutex_adjust_prio(pendowner); | ||
326 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
327 | |||
328 | /* | ||
329 | * We are going to steal the lock and a waiter was | ||
330 | * enqueued on the pending owners pi_waiters queue. So | ||
331 | * we have to enqueue this waiter into | ||
332 | * current->pi_waiters list. This covers the case, | ||
333 | * where current is boosted because it holds another | ||
334 | * lock and gets unboosted because the booster is | ||
335 | * interrupted, so we would delay a waiter with higher | ||
336 | * priority as current->normal_prio. | ||
337 | * | ||
338 | * Note: in the rare case of a SCHED_OTHER task changing | ||
339 | * its priority and thus stealing the lock, next->task | ||
340 | * might be current: | ||
341 | */ | ||
342 | if (likely(next->task != current)) { | ||
343 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
344 | plist_add(&next->pi_list_entry, ¤t->pi_waiters); | ||
345 | __rt_mutex_adjust_prio(current); | ||
346 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
347 | } | ||
348 | return 1; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Try to take an rt-mutex | ||
353 | * | ||
354 | * This fails | ||
355 | * - when the lock has a real owner | ||
356 | * - when a different pending owner exists and has higher priority than current | ||
357 | * | ||
358 | * Must be called with lock->wait_lock held. | ||
359 | */ | ||
360 | static int try_to_take_rt_mutex(struct rt_mutex *lock) | ||
361 | { | ||
362 | /* | ||
363 | * We have to be careful here if the atomic speedups are | ||
364 | * enabled, such that, when | ||
365 | * - no other waiter is on the lock | ||
366 | * - the lock has been released since we did the cmpxchg | ||
367 | * the lock can be released or taken while we are doing the | ||
368 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | ||
369 | * | ||
370 | * The atomic acquire/release aware variant of | ||
371 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | ||
372 | * the WAITERS bit, the atomic release / acquire can not | ||
373 | * happen anymore and lock->wait_lock protects us from the | ||
374 | * non-atomic case. | ||
375 | * | ||
376 | * Note, that this might set lock->owner = | ||
377 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | ||
378 | * any more. This is fixed up when we take the ownership. | ||
379 | * This is the transitional state explained at the top of this file. | ||
380 | */ | ||
381 | mark_rt_mutex_waiters(lock); | ||
382 | |||
383 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) | ||
384 | return 0; | ||
385 | |||
386 | /* We got the lock. */ | ||
387 | debug_rt_mutex_lock(lock); | ||
388 | |||
389 | rt_mutex_set_owner(lock, current, 0); | ||
390 | |||
391 | rt_mutex_deadlock_account_lock(lock, current); | ||
392 | |||
393 | return 1; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Task blocks on lock. | ||
398 | * | ||
399 | * Prepare waiter and propagate pi chain | ||
400 | * | ||
401 | * This must be called with lock->wait_lock held. | ||
402 | */ | ||
403 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | ||
404 | struct rt_mutex_waiter *waiter, | ||
405 | int detect_deadlock) | ||
406 | { | ||
407 | struct task_struct *owner = rt_mutex_owner(lock); | ||
408 | struct rt_mutex_waiter *top_waiter = waiter; | ||
409 | unsigned long flags; | ||
410 | int boost = 0, res; | ||
411 | |||
412 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
413 | __rt_mutex_adjust_prio(current); | ||
414 | waiter->task = current; | ||
415 | waiter->lock = lock; | ||
416 | plist_node_init(&waiter->list_entry, current->prio); | ||
417 | plist_node_init(&waiter->pi_list_entry, current->prio); | ||
418 | |||
419 | /* Get the top priority waiter on the lock */ | ||
420 | if (rt_mutex_has_waiters(lock)) | ||
421 | top_waiter = rt_mutex_top_waiter(lock); | ||
422 | plist_add(&waiter->list_entry, &lock->wait_list); | ||
423 | |||
424 | current->pi_blocked_on = waiter; | ||
425 | |||
426 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
427 | |||
428 | if (waiter == rt_mutex_top_waiter(lock)) { | ||
429 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
430 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | ||
431 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | ||
432 | |||
433 | __rt_mutex_adjust_prio(owner); | ||
434 | if (owner->pi_blocked_on) { | ||
435 | boost = 1; | ||
436 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
437 | get_task_struct(owner); | ||
438 | } | ||
439 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
440 | } | ||
441 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { | ||
442 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
443 | if (owner->pi_blocked_on) { | ||
444 | boost = 1; | ||
445 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
446 | get_task_struct(owner); | ||
447 | } | ||
448 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
449 | } | ||
450 | if (!boost) | ||
451 | return 0; | ||
452 | |||
453 | spin_unlock(&lock->wait_lock); | ||
454 | |||
455 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | ||
456 | current); | ||
457 | |||
458 | spin_lock(&lock->wait_lock); | ||
459 | |||
460 | return res; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * Wake up the next waiter on the lock. | ||
465 | * | ||
466 | * Remove the top waiter from the current tasks waiter list and from | ||
467 | * the lock waiter list. Set it as pending owner. Then wake it up. | ||
468 | * | ||
469 | * Called with lock->wait_lock held. | ||
470 | */ | ||
471 | static void wakeup_next_waiter(struct rt_mutex *lock) | ||
472 | { | ||
473 | struct rt_mutex_waiter *waiter; | ||
474 | struct task_struct *pendowner; | ||
475 | unsigned long flags; | ||
476 | |||
477 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
478 | |||
479 | waiter = rt_mutex_top_waiter(lock); | ||
480 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
481 | |||
482 | /* | ||
483 | * Remove it from current->pi_waiters. We do not adjust a | ||
484 | * possible priority boost right now. We execute wakeup in the | ||
485 | * boosted mode and go back to normal after releasing | ||
486 | * lock->wait_lock. | ||
487 | */ | ||
488 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | ||
489 | pendowner = waiter->task; | ||
490 | waiter->task = NULL; | ||
491 | |||
492 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | ||
493 | |||
494 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
495 | |||
496 | /* | ||
497 | * Clear the pi_blocked_on variable and enqueue a possible | ||
498 | * waiter into the pi_waiters list of the pending owner. This | ||
499 | * prevents that in case the pending owner gets unboosted a | ||
500 | * waiter with higher priority than pending-owner->normal_prio | ||
501 | * is blocked on the unboosted (pending) owner. | ||
502 | */ | ||
503 | spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
504 | |||
505 | WARN_ON(!pendowner->pi_blocked_on); | ||
506 | WARN_ON(pendowner->pi_blocked_on != waiter); | ||
507 | WARN_ON(pendowner->pi_blocked_on->lock != lock); | ||
508 | |||
509 | pendowner->pi_blocked_on = NULL; | ||
510 | |||
511 | if (rt_mutex_has_waiters(lock)) { | ||
512 | struct rt_mutex_waiter *next; | ||
513 | |||
514 | next = rt_mutex_top_waiter(lock); | ||
515 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | ||
516 | } | ||
517 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
518 | |||
519 | wake_up_process(pendowner); | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * Remove a waiter from a lock | ||
524 | * | ||
525 | * Must be called with lock->wait_lock held | ||
526 | */ | ||
527 | static void remove_waiter(struct rt_mutex *lock, | ||
528 | struct rt_mutex_waiter *waiter) | ||
529 | { | ||
530 | int first = (waiter == rt_mutex_top_waiter(lock)); | ||
531 | struct task_struct *owner = rt_mutex_owner(lock); | ||
532 | unsigned long flags; | ||
533 | int boost = 0; | ||
534 | |||
535 | spin_lock_irqsave(¤t->pi_lock, flags); | ||
536 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
537 | waiter->task = NULL; | ||
538 | current->pi_blocked_on = NULL; | ||
539 | spin_unlock_irqrestore(¤t->pi_lock, flags); | ||
540 | |||
541 | if (first && owner != current) { | ||
542 | |||
543 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
544 | |||
545 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | ||
546 | |||
547 | if (rt_mutex_has_waiters(lock)) { | ||
548 | struct rt_mutex_waiter *next; | ||
549 | |||
550 | next = rt_mutex_top_waiter(lock); | ||
551 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | ||
552 | } | ||
553 | __rt_mutex_adjust_prio(owner); | ||
554 | |||
555 | if (owner->pi_blocked_on) { | ||
556 | boost = 1; | ||
557 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
558 | get_task_struct(owner); | ||
559 | } | ||
560 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
561 | } | ||
562 | |||
563 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
564 | |||
565 | if (!boost) | ||
566 | return; | ||
567 | |||
568 | spin_unlock(&lock->wait_lock); | ||
569 | |||
570 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); | ||
571 | |||
572 | spin_lock(&lock->wait_lock); | ||
573 | } | ||
574 | |||
575 | /* | ||
576 | * Recheck the pi chain, in case we got a priority setting | ||
577 | * | ||
578 | * Called from sched_setscheduler | ||
579 | */ | ||
580 | void rt_mutex_adjust_pi(struct task_struct *task) | ||
581 | { | ||
582 | struct rt_mutex_waiter *waiter; | ||
583 | unsigned long flags; | ||
584 | |||
585 | spin_lock_irqsave(&task->pi_lock, flags); | ||
586 | |||
587 | waiter = task->pi_blocked_on; | ||
588 | if (!waiter || waiter->list_entry.prio == task->prio) { | ||
589 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
590 | return; | ||
591 | } | ||
592 | |||
593 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
594 | get_task_struct(task); | ||
595 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
596 | |||
597 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Slow path lock function: | ||
602 | */ | ||
603 | static int __sched | ||
604 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||
605 | struct hrtimer_sleeper *timeout, | ||
606 | int detect_deadlock) | ||
607 | { | ||
608 | struct rt_mutex_waiter waiter; | ||
609 | int ret = 0; | ||
610 | |||
611 | debug_rt_mutex_init_waiter(&waiter); | ||
612 | waiter.task = NULL; | ||
613 | |||
614 | spin_lock(&lock->wait_lock); | ||
615 | |||
616 | /* Try to acquire the lock again: */ | ||
617 | if (try_to_take_rt_mutex(lock)) { | ||
618 | spin_unlock(&lock->wait_lock); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | set_current_state(state); | ||
623 | |||
624 | /* Setup the timer, when timeout != NULL */ | ||
625 | if (unlikely(timeout)) | ||
626 | hrtimer_start(&timeout->timer, timeout->timer.expires, | ||
627 | HRTIMER_ABS); | ||
628 | |||
629 | for (;;) { | ||
630 | /* Try to acquire the lock: */ | ||
631 | if (try_to_take_rt_mutex(lock)) | ||
632 | break; | ||
633 | |||
634 | /* | ||
635 | * TASK_INTERRUPTIBLE checks for signals and | ||
636 | * timeout. Ignored otherwise. | ||
637 | */ | ||
638 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | ||
639 | /* Signal pending? */ | ||
640 | if (signal_pending(current)) | ||
641 | ret = -EINTR; | ||
642 | if (timeout && !timeout->task) | ||
643 | ret = -ETIMEDOUT; | ||
644 | if (ret) | ||
645 | break; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * waiter.task is NULL the first time we come here and | ||
650 | * when we have been woken up by the previous owner | ||
651 | * but the lock got stolen by a higher prio task. | ||
652 | */ | ||
653 | if (!waiter.task) { | ||
654 | ret = task_blocks_on_rt_mutex(lock, &waiter, | ||
655 | detect_deadlock); | ||
656 | /* | ||
657 | * If we got woken up by the owner then start loop | ||
658 | * all over without going into schedule to try | ||
659 | * to get the lock now: | ||
660 | */ | ||
661 | if (unlikely(!waiter.task)) | ||
662 | continue; | ||
663 | |||
664 | if (unlikely(ret)) | ||
665 | break; | ||
666 | } | ||
667 | |||
668 | spin_unlock(&lock->wait_lock); | ||
669 | |||
670 | debug_rt_mutex_print_deadlock(&waiter); | ||
671 | |||
672 | if (waiter.task) | ||
673 | schedule_rt_mutex(lock); | ||
674 | |||
675 | spin_lock(&lock->wait_lock); | ||
676 | set_current_state(state); | ||
677 | } | ||
678 | |||
679 | set_current_state(TASK_RUNNING); | ||
680 | |||
681 | if (unlikely(waiter.task)) | ||
682 | remove_waiter(lock, &waiter); | ||
683 | |||
684 | /* | ||
685 | * try_to_take_rt_mutex() sets the waiter bit | ||
686 | * unconditionally. We might have to fix that up. | ||
687 | */ | ||
688 | fixup_rt_mutex_waiters(lock); | ||
689 | |||
690 | spin_unlock(&lock->wait_lock); | ||
691 | |||
692 | /* Remove pending timer: */ | ||
693 | if (unlikely(timeout)) | ||
694 | hrtimer_cancel(&timeout->timer); | ||
695 | |||
696 | /* | ||
697 | * Readjust priority, when we did not get the lock. We might | ||
698 | * have been the pending owner and boosted. Since we did not | ||
699 | * take the lock, the PI boost has to go. | ||
700 | */ | ||
701 | if (unlikely(ret)) | ||
702 | rt_mutex_adjust_prio(current); | ||
703 | |||
704 | debug_rt_mutex_free_waiter(&waiter); | ||
705 | |||
706 | return ret; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * Slow path try-lock function: | ||
711 | */ | ||
712 | static inline int | ||
713 | rt_mutex_slowtrylock(struct rt_mutex *lock) | ||
714 | { | ||
715 | int ret = 0; | ||
716 | |||
717 | spin_lock(&lock->wait_lock); | ||
718 | |||
719 | if (likely(rt_mutex_owner(lock) != current)) { | ||
720 | |||
721 | ret = try_to_take_rt_mutex(lock); | ||
722 | /* | ||
723 | * try_to_take_rt_mutex() sets the lock waiters | ||
724 | * bit unconditionally. Clean this up. | ||
725 | */ | ||
726 | fixup_rt_mutex_waiters(lock); | ||
727 | } | ||
728 | |||
729 | spin_unlock(&lock->wait_lock); | ||
730 | |||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Slow path to release a rt-mutex: | ||
736 | */ | ||
737 | static void __sched | ||
738 | rt_mutex_slowunlock(struct rt_mutex *lock) | ||
739 | { | ||
740 | spin_lock(&lock->wait_lock); | ||
741 | |||
742 | debug_rt_mutex_unlock(lock); | ||
743 | |||
744 | rt_mutex_deadlock_account_unlock(current); | ||
745 | |||
746 | if (!rt_mutex_has_waiters(lock)) { | ||
747 | lock->owner = NULL; | ||
748 | spin_unlock(&lock->wait_lock); | ||
749 | return; | ||
750 | } | ||
751 | |||
752 | wakeup_next_waiter(lock); | ||
753 | |||
754 | spin_unlock(&lock->wait_lock); | ||
755 | |||
756 | /* Undo pi boosting if necessary: */ | ||
757 | rt_mutex_adjust_prio(current); | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * debug aware fast / slowpath lock,trylock,unlock | ||
762 | * | ||
763 | * The atomic acquire/release ops are compiled away, when either the | ||
764 | * architecture does not support cmpxchg or when debugging is enabled. | ||
765 | */ | ||
766 | static inline int | ||
767 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | ||
768 | int detect_deadlock, | ||
769 | int (*slowfn)(struct rt_mutex *lock, int state, | ||
770 | struct hrtimer_sleeper *timeout, | ||
771 | int detect_deadlock)) | ||
772 | { | ||
773 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
774 | rt_mutex_deadlock_account_lock(lock, current); | ||
775 | return 0; | ||
776 | } else | ||
777 | return slowfn(lock, state, NULL, detect_deadlock); | ||
778 | } | ||
779 | |||
780 | static inline int | ||
781 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | ||
782 | struct hrtimer_sleeper *timeout, int detect_deadlock, | ||
783 | int (*slowfn)(struct rt_mutex *lock, int state, | ||
784 | struct hrtimer_sleeper *timeout, | ||
785 | int detect_deadlock)) | ||
786 | { | ||
787 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
788 | rt_mutex_deadlock_account_lock(lock, current); | ||
789 | return 0; | ||
790 | } else | ||
791 | return slowfn(lock, state, timeout, detect_deadlock); | ||
792 | } | ||
793 | |||
794 | static inline int | ||
795 | rt_mutex_fasttrylock(struct rt_mutex *lock, | ||
796 | int (*slowfn)(struct rt_mutex *lock)) | ||
797 | { | ||
798 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
799 | rt_mutex_deadlock_account_lock(lock, current); | ||
800 | return 1; | ||
801 | } | ||
802 | return slowfn(lock); | ||
803 | } | ||
804 | |||
805 | static inline void | ||
806 | rt_mutex_fastunlock(struct rt_mutex *lock, | ||
807 | void (*slowfn)(struct rt_mutex *lock)) | ||
808 | { | ||
809 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | ||
810 | rt_mutex_deadlock_account_unlock(current); | ||
811 | else | ||
812 | slowfn(lock); | ||
813 | } | ||
814 | |||
815 | /** | ||
816 | * rt_mutex_lock - lock a rt_mutex | ||
817 | * | ||
818 | * @lock: the rt_mutex to be locked | ||
819 | */ | ||
820 | void __sched rt_mutex_lock(struct rt_mutex *lock) | ||
821 | { | ||
822 | might_sleep(); | ||
823 | |||
824 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | ||
825 | } | ||
826 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | ||
827 | |||
828 | /** | ||
829 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | ||
830 | * | ||
831 | * @lock: the rt_mutex to be locked | ||
832 | * @detect_deadlock: deadlock detection on/off | ||
833 | * | ||
834 | * Returns: | ||
835 | * 0 on success | ||
836 | * -EINTR when interrupted by a signal | ||
837 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | ||
838 | */ | ||
839 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | ||
840 | int detect_deadlock) | ||
841 | { | ||
842 | might_sleep(); | ||
843 | |||
844 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | ||
845 | detect_deadlock, rt_mutex_slowlock); | ||
846 | } | ||
847 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | ||
848 | |||
849 | /** | ||
850 | * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible | ||
851 | * the timeout structure is provided | ||
852 | * by the caller | ||
853 | * | ||
854 | * @lock: the rt_mutex to be locked | ||
855 | * @timeout: timeout structure or NULL (no timeout) | ||
856 | * @detect_deadlock: deadlock detection on/off | ||
857 | * | ||
858 | * Returns: | ||
859 | * 0 on success | ||
860 | * -EINTR when interrupted by a signal | ||
861 | * -ETIMEOUT when the timeout expired | ||
862 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | ||
863 | */ | ||
864 | int | ||
865 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | ||
866 | int detect_deadlock) | ||
867 | { | ||
868 | might_sleep(); | ||
869 | |||
870 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | ||
871 | detect_deadlock, rt_mutex_slowlock); | ||
872 | } | ||
873 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | ||
874 | |||
875 | /** | ||
876 | * rt_mutex_trylock - try to lock a rt_mutex | ||
877 | * | ||
878 | * @lock: the rt_mutex to be locked | ||
879 | * | ||
880 | * Returns 1 on success and 0 on contention | ||
881 | */ | ||
882 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | ||
883 | { | ||
884 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | ||
885 | } | ||
886 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | ||
887 | |||
888 | /** | ||
889 | * rt_mutex_unlock - unlock a rt_mutex | ||
890 | * | ||
891 | * @lock: the rt_mutex to be unlocked | ||
892 | */ | ||
893 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | ||
894 | { | ||
895 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | ||
896 | } | ||
897 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | ||
898 | |||
899 | /*** | ||
900 | * rt_mutex_destroy - mark a mutex unusable | ||
901 | * @lock: the mutex to be destroyed | ||
902 | * | ||
903 | * This function marks the mutex uninitialized, and any subsequent | ||
904 | * use of the mutex is forbidden. The mutex must not be locked when | ||
905 | * this function is called. | ||
906 | */ | ||
907 | void rt_mutex_destroy(struct rt_mutex *lock) | ||
908 | { | ||
909 | WARN_ON(rt_mutex_is_locked(lock)); | ||
910 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
911 | lock->magic = NULL; | ||
912 | #endif | ||
913 | } | ||
914 | |||
915 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | ||
916 | |||
917 | /** | ||
918 | * __rt_mutex_init - initialize the rt lock | ||
919 | * | ||
920 | * @lock: the rt lock to be initialized | ||
921 | * | ||
922 | * Initialize the rt lock to unlocked state. | ||
923 | * | ||
924 | * Initializing of a locked rt lock is not allowed | ||
925 | */ | ||
926 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | ||
927 | { | ||
928 | lock->owner = NULL; | ||
929 | spin_lock_init(&lock->wait_lock); | ||
930 | plist_head_init(&lock->wait_list, &lock->wait_lock); | ||
931 | |||
932 | debug_rt_mutex_init(lock, name); | ||
933 | } | ||
934 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | ||
935 | |||
936 | /** | ||
937 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | ||
938 | * proxy owner | ||
939 | * | ||
940 | * @lock: the rt_mutex to be locked | ||
941 | * @proxy_owner:the task to set as owner | ||
942 | * | ||
943 | * No locking. Caller has to do serializing itself | ||
944 | * Special API call for PI-futex support | ||
945 | */ | ||
946 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | ||
947 | struct task_struct *proxy_owner) | ||
948 | { | ||
949 | __rt_mutex_init(lock, NULL); | ||
950 | debug_rt_mutex_proxy_lock(lock, proxy_owner); | ||
951 | rt_mutex_set_owner(lock, proxy_owner, 0); | ||
952 | rt_mutex_deadlock_account_lock(lock, proxy_owner); | ||
953 | } | ||
954 | |||
955 | /** | ||
956 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | ||
957 | * | ||
958 | * @lock: the rt_mutex to be locked | ||
959 | * | ||
960 | * No locking. Caller has to do serializing itself | ||
961 | * Special API call for PI-futex support | ||
962 | */ | ||
963 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | ||
964 | struct task_struct *proxy_owner) | ||
965 | { | ||
966 | debug_rt_mutex_proxy_unlock(lock); | ||
967 | rt_mutex_set_owner(lock, NULL, 0); | ||
968 | rt_mutex_deadlock_account_unlock(proxy_owner); | ||
969 | } | ||
970 | |||
971 | /** | ||
972 | * rt_mutex_next_owner - return the next owner of the lock | ||
973 | * | ||
974 | * @lock: the rt lock query | ||
975 | * | ||
976 | * Returns the next owner of the lock or NULL | ||
977 | * | ||
978 | * Caller has to serialize against other accessors to the lock | ||
979 | * itself. | ||
980 | * | ||
981 | * Special API call for PI-futex support | ||
982 | */ | ||
983 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | ||
984 | { | ||
985 | if (!rt_mutex_has_waiters(lock)) | ||
986 | return NULL; | ||
987 | |||
988 | return rt_mutex_top_waiter(lock)->task; | ||
989 | } | ||