diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/mutex.c | 91 |
1 files changed, 90 insertions, 1 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 70ebd855d9e8..1dbd4210baef 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -55,6 +55,9 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
55 | spin_lock_init(&lock->wait_lock); | 55 | spin_lock_init(&lock->wait_lock); |
56 | INIT_LIST_HEAD(&lock->wait_list); | 56 | INIT_LIST_HEAD(&lock->wait_list); |
57 | mutex_clear_owner(lock); | 57 | mutex_clear_owner(lock); |
58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
59 | lock->spin_mlock = NULL; | ||
60 | #endif | ||
58 | 61 | ||
59 | debug_mutex_init(lock, name, key); | 62 | debug_mutex_init(lock, name, key); |
60 | } | 63 | } |
@@ -108,6 +111,60 @@ EXPORT_SYMBOL(mutex_lock); | |||
108 | 111 | ||
109 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 112 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
110 | /* | 113 | /* |
114 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | ||
115 | * more or less simultaneously, the spinners need to acquire a MCS lock | ||
116 | * first before spinning on the owner field. | ||
117 | * | ||
118 | * We don't inline mspin_lock() so that perf can correctly account for the | ||
119 | * time spent in this lock function. | ||
120 | */ | ||
121 | struct mspin_node { | ||
122 | struct mspin_node *next ; | ||
123 | int locked; /* 1 if lock acquired */ | ||
124 | }; | ||
125 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | ||
126 | |||
127 | static noinline | ||
128 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | ||
129 | { | ||
130 | struct mspin_node *prev; | ||
131 | |||
132 | /* Init node */ | ||
133 | node->locked = 0; | ||
134 | node->next = NULL; | ||
135 | |||
136 | prev = xchg(lock, node); | ||
137 | if (likely(prev == NULL)) { | ||
138 | /* Lock acquired */ | ||
139 | node->locked = 1; | ||
140 | return; | ||
141 | } | ||
142 | ACCESS_ONCE(prev->next) = node; | ||
143 | smp_wmb(); | ||
144 | /* Wait until the lock holder passes the lock down */ | ||
145 | while (!ACCESS_ONCE(node->locked)) | ||
146 | arch_mutex_cpu_relax(); | ||
147 | } | ||
148 | |||
149 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | ||
150 | { | ||
151 | struct mspin_node *next = ACCESS_ONCE(node->next); | ||
152 | |||
153 | if (likely(!next)) { | ||
154 | /* | ||
155 | * Release the lock by setting it to NULL | ||
156 | */ | ||
157 | if (cmpxchg(lock, node, NULL) == node) | ||
158 | return; | ||
159 | /* Wait until the next pointer is set */ | ||
160 | while (!(next = ACCESS_ONCE(node->next))) | ||
161 | arch_mutex_cpu_relax(); | ||
162 | } | ||
163 | ACCESS_ONCE(next->locked) = 1; | ||
164 | smp_wmb(); | ||
165 | } | ||
166 | |||
167 | /* | ||
111 | * Mutex spinning code migrated from kernel/sched/core.c | 168 | * Mutex spinning code migrated from kernel/sched/core.c |
112 | */ | 169 | */ |
113 | 170 | ||
@@ -150,6 +207,24 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |||
150 | */ | 207 | */ |
151 | return lock->owner == NULL; | 208 | return lock->owner == NULL; |
152 | } | 209 | } |
210 | |||
211 | /* | ||
212 | * Initial check for entering the mutex spinning loop | ||
213 | */ | ||
214 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | ||
215 | { | ||
216 | int retval = 1; | ||
217 | |||
218 | rcu_read_lock(); | ||
219 | if (lock->owner) | ||
220 | retval = lock->owner->on_cpu; | ||
221 | rcu_read_unlock(); | ||
222 | /* | ||
223 | * if lock->owner is not set, the mutex owner may have just acquired | ||
224 | * it and not set the owner yet or the mutex has been released. | ||
225 | */ | ||
226 | return retval; | ||
227 | } | ||
153 | #endif | 228 | #endif |
154 | 229 | ||
155 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 230 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
@@ -215,26 +290,39 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
215 | * | 290 | * |
216 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | 291 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
217 | * to serialize everything. | 292 | * to serialize everything. |
293 | * | ||
294 | * The mutex spinners are queued up using MCS lock so that only one | ||
295 | * spinner can compete for the mutex. However, if mutex spinning isn't | ||
296 | * going to happen, there is no point in going through the lock/unlock | ||
297 | * overhead. | ||
218 | */ | 298 | */ |
299 | if (!mutex_can_spin_on_owner(lock)) | ||
300 | goto slowpath; | ||
219 | 301 | ||
220 | for (;;) { | 302 | for (;;) { |
221 | struct task_struct *owner; | 303 | struct task_struct *owner; |
304 | struct mspin_node node; | ||
222 | 305 | ||
223 | /* | 306 | /* |
224 | * If there's an owner, wait for it to either | 307 | * If there's an owner, wait for it to either |
225 | * release the lock or go to sleep. | 308 | * release the lock or go to sleep. |
226 | */ | 309 | */ |
310 | mspin_lock(MLOCK(lock), &node); | ||
227 | owner = ACCESS_ONCE(lock->owner); | 311 | owner = ACCESS_ONCE(lock->owner); |
228 | if (owner && !mutex_spin_on_owner(lock, owner)) | 312 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
313 | mspin_unlock(MLOCK(lock), &node); | ||
229 | break; | 314 | break; |
315 | } | ||
230 | 316 | ||
231 | if ((atomic_read(&lock->count) == 1) && | 317 | if ((atomic_read(&lock->count) == 1) && |
232 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | 318 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
233 | lock_acquired(&lock->dep_map, ip); | 319 | lock_acquired(&lock->dep_map, ip); |
234 | mutex_set_owner(lock); | 320 | mutex_set_owner(lock); |
321 | mspin_unlock(MLOCK(lock), &node); | ||
235 | preempt_enable(); | 322 | preempt_enable(); |
236 | return 0; | 323 | return 0; |
237 | } | 324 | } |
325 | mspin_unlock(MLOCK(lock), &node); | ||
238 | 326 | ||
239 | /* | 327 | /* |
240 | * When there's no owner, we might have preempted between the | 328 | * When there's no owner, we might have preempted between the |
@@ -253,6 +341,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
253 | */ | 341 | */ |
254 | arch_mutex_cpu_relax(); | 342 | arch_mutex_cpu_relax(); |
255 | } | 343 | } |
344 | slowpath: | ||
256 | #endif | 345 | #endif |
257 | spin_lock_mutex(&lock->wait_lock, flags); | 346 | spin_lock_mutex(&lock->wait_lock, flags); |
258 | 347 | ||