diff options
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 41 |
1 files changed, 32 insertions, 9 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index a6fbb4130521..17d28ce20300 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -56,7 +56,7 @@ | |||
56 | * state. | 56 | * state. |
57 | */ | 57 | */ |
58 | 58 | ||
59 | void | 59 | static void |
60 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | 60 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, |
61 | unsigned long mask) | 61 | unsigned long mask) |
62 | { | 62 | { |
@@ -81,6 +81,29 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * We can speed up the acquire/release, if the architecture | ||
85 | * supports cmpxchg and if there's no debugging state to be set up | ||
86 | */ | ||
87 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | ||
88 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | ||
89 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
90 | { | ||
91 | unsigned long owner, *p = (unsigned long *) &lock->owner; | ||
92 | |||
93 | do { | ||
94 | owner = *p; | ||
95 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | ||
96 | } | ||
97 | #else | ||
98 | # define rt_mutex_cmpxchg(l,c,n) (0) | ||
99 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | ||
100 | { | ||
101 | lock->owner = (struct task_struct *) | ||
102 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | /* | ||
84 | * Calculate task priority from the waiter list priority | 107 | * Calculate task priority from the waiter list priority |
85 | * | 108 | * |
86 | * Return task->normal_prio when the waiter list is empty or when | 109 | * Return task->normal_prio when the waiter list is empty or when |
@@ -100,7 +123,7 @@ int rt_mutex_getprio(struct task_struct *task) | |||
100 | * | 123 | * |
101 | * This can be both boosting and unboosting. task->pi_lock must be held. | 124 | * This can be both boosting and unboosting. task->pi_lock must be held. |
102 | */ | 125 | */ |
103 | void __rt_mutex_adjust_prio(struct task_struct *task) | 126 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
104 | { | 127 | { |
105 | int prio = rt_mutex_getprio(task); | 128 | int prio = rt_mutex_getprio(task); |
106 | 129 | ||
@@ -136,11 +159,11 @@ int max_lock_depth = 1024; | |||
136 | * Decreases task's usage by one - may thus free the task. | 159 | * Decreases task's usage by one - may thus free the task. |
137 | * Returns 0 or -EDEADLK. | 160 | * Returns 0 or -EDEADLK. |
138 | */ | 161 | */ |
139 | int rt_mutex_adjust_prio_chain(struct task_struct *task, | 162 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
140 | int deadlock_detect, | 163 | int deadlock_detect, |
141 | struct rt_mutex *orig_lock, | 164 | struct rt_mutex *orig_lock, |
142 | struct rt_mutex_waiter *orig_waiter, | 165 | struct rt_mutex_waiter *orig_waiter, |
143 | struct task_struct *top_task) | 166 | struct task_struct *top_task) |
144 | { | 167 | { |
145 | struct rt_mutex *lock; | 168 | struct rt_mutex *lock; |
146 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 169 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
@@ -514,8 +537,8 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
514 | * | 537 | * |
515 | * Must be called with lock->wait_lock held | 538 | * Must be called with lock->wait_lock held |
516 | */ | 539 | */ |
517 | void remove_waiter(struct rt_mutex *lock, | 540 | static void remove_waiter(struct rt_mutex *lock, |
518 | struct rt_mutex_waiter *waiter) | 541 | struct rt_mutex_waiter *waiter) |
519 | { | 542 | { |
520 | int first = (waiter == rt_mutex_top_waiter(lock)); | 543 | int first = (waiter == rt_mutex_top_waiter(lock)); |
521 | struct task_struct *owner = rt_mutex_owner(lock); | 544 | struct task_struct *owner = rt_mutex_owner(lock); |